repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
ouyanghy/nanopi2 | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
rainslytherin/ansible | lib/ansible/runner/connection_plugins/ssh.py | 104 | 19988 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
import subprocess
import shlex
import pipes
import random
import select
import fcntl
import hmac
import pwd
import gettext
import pty
from hashlib import sha1
import ansible.constants as C
from ansible.callbacks import vvv
from ansible import errors
from ansible import utils
class Connection(object):
''' ssh based connections '''
def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
self.runner = runner
self.host = host
self.ipv6 = ':' in self.host
self.port = port
self.user = str(user)
self.password = password
self.private_key_file = private_key_file
self.HASHED_KEY_MAGIC = "|1|"
self.has_pipelining = True
# TODO: add pbrun, pfexec
self.become_methods_supported=['sudo', 'su', 'pbrun']
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
def connect(self):
''' connect to the remote host '''
vvv("ESTABLISH CONNECTION FOR USER: %s" % self.user, host=self.host)
self.common_args = []
extra_args = C.ANSIBLE_SSH_ARGS
if extra_args is not None:
# make sure there is no empty string added as this can produce weird errors
self.common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
else:
self.common_args += ["-o", "ControlMaster=auto",
"-o", "ControlPersist=60s",
"-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
cp_in_use = False
cp_path_set = False
for arg in self.common_args:
if "ControlPersist" in arg:
cp_in_use = True
if "ControlPath" in arg:
cp_path_set = True
if cp_in_use and not cp_path_set:
self.common_args += ["-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
if not C.HOST_KEY_CHECKING:
self.common_args += ["-o", "StrictHostKeyChecking=no"]
if self.port is not None:
self.common_args += ["-o", "Port=%d" % (self.port)]
if self.private_key_file is not None:
self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.private_key_file)]
elif self.runner.private_key_file is not None:
self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.runner.private_key_file)]
if self.password:
self.common_args += ["-o", "GSSAPIAuthentication=no",
"-o", "PubkeyAuthentication=no"]
else:
self.common_args += ["-o", "KbdInteractiveAuthentication=no",
"-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
"-o", "PasswordAuthentication=no"]
if self.user != pwd.getpwuid(os.geteuid())[0]:
self.common_args += ["-o", "User="+self.user]
self.common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout]
return self
def _run(self, cmd, indata):
if indata:
# do not use pseudo-pty
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = p.stdin
else:
# try to use upseudo-pty
try:
# Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
master, slave = pty.openpty()
p = subprocess.Popen(cmd, stdin=slave,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = os.fdopen(master, 'w', 0)
os.close(slave)
except:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = p.stdin
return (p, stdin)
def _password_cmd(self):
if self.password:
try:
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
except OSError:
raise errors.AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
(self.rfd, self.wfd) = os.pipe()
return ["sshpass", "-d%d" % self.rfd]
return []
def _send_password(self):
if self.password:
os.close(self.rfd)
os.write(self.wfd, "%s\n" % self.password)
os.close(self.wfd)
def _communicate(self, p, stdin, indata, sudoable=False, prompt=None):
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
# We can't use p.communicate here because the ControlMaster may have stdout open as well
stdout = ''
stderr = ''
rpipes = [p.stdout, p.stderr]
if indata:
try:
stdin.write(indata)
stdin.close()
except:
raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
# Read stdout/stderr from process
while True:
rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
# fail early if the become password is wrong
if self.runner.become and sudoable:
incorrect_password = gettext.dgettext(self.runner.become_method, C.BECOME_ERROR_STRINGS[self.runner.become_method])
if prompt:
if self.runner.become_pass:
if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
raise errors.AnsibleError('Incorrect become password')
if stdout.endswith(prompt):
raise errors.AnsibleError('Missing become password')
elif stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
raise errors.AnsibleError('Incorrect become password')
if p.stdout in rfd:
dat = os.read(p.stdout.fileno(), 9000)
stdout += dat
if dat == '':
rpipes.remove(p.stdout)
if p.stderr in rfd:
dat = os.read(p.stderr.fileno(), 9000)
stderr += dat
if dat == '':
rpipes.remove(p.stderr)
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfd) and p.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually p.poll() is always None here if rpipes is empty
elif not rpipes and p.poll() == None:
p.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
# close stdin after process is terminated and stdout/stderr are read
# completely (see also issue #848)
stdin.close()
return (p.returncode, stdout, stderr)
def not_in_host_file(self, host):
if 'USER' in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
user_host_file = os.path.expanduser(user_host_file)
host_file_list = []
host_file_list.append(user_host_file)
host_file_list.append("/etc/ssh/ssh_known_hosts")
host_file_list.append("/etc/ssh/ssh_known_hosts2")
hfiles_not_found = 0
for hf in host_file_list:
if not os.path.exists(hf):
hfiles_not_found += 1
continue
try:
host_fh = open(hf)
except IOError, e:
hfiles_not_found += 1
continue
else:
data = host_fh.read()
host_fh.close()
for line in data.split("\n"):
line = line.strip()
if line is None or " " not in line:
continue
tokens = line.split()
if not tokens:
continue
if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode('base64'):
return False
except:
# invalid hashed host key, skip it
continue
else:
# standard host file entry
if host in tokens[0]:
return False
if (hfiles_not_found == len(host_file_list)):
vvv("EXEC previous known host file not found for %s" % host)
return True
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the remote host '''
if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
ssh_cmd = self._password_cmd()
ssh_cmd += ["ssh", "-C"]
if not in_data:
# we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python
# inside a tty automatically invokes the python interactive-mode but the modules are not
# compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
ssh_cmd += ["-tt"]
if utils.VERBOSITY > 3:
ssh_cmd += ["-vvv"]
else:
if self.runner.module_name == 'raw':
ssh_cmd += ["-q"]
else:
ssh_cmd += ["-v"]
ssh_cmd += self.common_args
if self.ipv6:
ssh_cmd += ['-6']
ssh_cmd += [self.host]
if self.runner.become and sudoable:
becomecmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
ssh_cmd.append(becomecmd)
else:
prompt = None
if executable:
ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
else:
ssh_cmd.append(cmd)
vvv("EXEC %s" % ' '.join(ssh_cmd), host=self.host)
not_in_host_file = self.not_in_host_file(self.host)
if C.HOST_KEY_CHECKING and not_in_host_file:
# lock around the initial SSH connectivity so the user prompt about whether to add
# the host to known hosts is not intermingled with multiprocess output.
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
# create process
(p, stdin) = self._run(ssh_cmd, in_data)
self._send_password()
no_prompt_out = ''
no_prompt_err = ''
if sudoable and self.runner.become and self.runner.become_pass:
# several cases are handled for escalated privileges with password
# * NOPASSWD (tty & no-tty): detect success_key on stdout
# * without NOPASSWD:
# * detect prompt on stdout (tty)
# * detect prompt on stderr (no-tty)
fcntl.fcntl(p.stdout, fcntl.F_SETFL,
fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL,
fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
become_output = ''
become_errput = ''
while True:
if success_key in become_output or \
(prompt and become_output.endswith(prompt)) or \
utils.su_prompts.check_su_prompt(become_output):
break
rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
[p.stdout], self.runner.timeout)
if p.stderr in rfd:
chunk = p.stderr.read()
if not chunk:
raise errors.AnsibleError('ssh connection closed waiting for a privilege escalation password prompt')
become_errput += chunk
incorrect_password = gettext.dgettext(
"become", "Sorry, try again.")
if become_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
raise errors.AnsibleError('Incorrect become password')
elif prompt and become_errput.endswith(prompt):
stdin.write(self.runner.become_pass + '\n')
if p.stdout in rfd:
chunk = p.stdout.read()
if not chunk:
raise errors.AnsibleError('ssh connection closed waiting for %s password prompt' % self.runner.become_method)
become_output += chunk
if not rfd:
# timeout. wrap up process communication
stdout = p.communicate()
raise errors.AnsibleError('ssh connection error while waiting for %s password prompt' % self.runner.become_method)
if success_key in become_output:
no_prompt_out += become_output
no_prompt_err += become_errput
elif sudoable:
stdin.write(self.runner.become_pass + '\n')
(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable, prompt=prompt)
if C.HOST_KEY_CHECKING and not_in_host_file:
# lock around the initial SSH connectivity so the user prompt about whether to add
# the host to known hosts is not intermingled with multiprocess output.
fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or \
'unknown configuration option: ControlPersist' in stderr
if C.HOST_KEY_CHECKING:
if ssh_cmd[0] == "sshpass" and p.returncode == 6:
raise errors.AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
if p.returncode != 0 and controlpersisterror:
raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'):
raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
if p.returncode == 255:
ip = None
port = None
for line in stderr.splitlines():
match = re.search(
'Connecting to .*\[(\d+\.\d+\.\d+\.\d+)\] port (\d+)',
line)
if match:
ip = match.group(1)
port = match.group(2)
if 'UNPROTECTED PRIVATE KEY FILE' in stderr:
lines = [line for line in stderr.splitlines()
if 'ignore key:' in line]
else:
lines = stderr.splitlines()[-1:]
if ip and port:
lines.append(' while connecting to %s:%s' % (ip, port))
lines.append(
'It is sometimes useful to re-run the command using -vvvv, '
'which prints SSH debug output to help diagnose the issue.')
raise errors.AnsibleError('SSH Error: %s' % '\n'.join(lines))
return (p.returncode, '', no_prompt_out + stdout, no_prompt_err + stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
cmd = self._password_cmd()
host = self.host
if self.ipv6:
host = '[%s]' % host
if C.DEFAULT_SCP_IF_SSH:
cmd += ["scp"] + self.common_args
cmd += [in_path,host + ":" + pipes.quote(out_path)]
indata = None
else:
cmd += ["sftp"] + self.common_args + [host]
indata = "put %s %s\n" % (pipes.quote(in_path), pipes.quote(out_path))
(p, stdin) = self._run(cmd, indata)
self._send_password()
(returncode, stdout, stderr) = self._communicate(p, stdin, indata)
if returncode != 0:
raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr))
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
cmd = self._password_cmd()
host = self.host
if self.ipv6:
host = '[%s]' % host
if C.DEFAULT_SCP_IF_SSH:
cmd += ["scp"] + self.common_args
cmd += [host + ":" + in_path, out_path]
indata = None
else:
cmd += ["sftp"] + self.common_args + [host]
indata = "get %s %s\n" % (in_path, out_path)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self._send_password()
stdout, stderr = p.communicate(indata)
if p.returncode != 0:
raise errors.AnsibleError("failed to transfer file from %s:\n%s\n%s" % (in_path, stdout, stderr))
def close(self):
''' not applicable since we're executing openssh binaries '''
pass
| gpl-3.0 |
Upward-Spiral-Science/team1 | code/test_assumptions.py | 1 | 1525 | import numpy as np
import matplotlib.pyplot as plt
import urllib2
#%matplotlib inline
sample_size = 1000
np.random.seed(1)
url = ('https://raw.githubusercontent.com/Upward-Spiral-Science'
'/data/master/syn-density/output.csv')
data = urllib2.urlopen(url)
csv = np.genfromtxt(data, delimiter=",")[1:]
csv_rand = None
for i in range (1, sample_size):
#Randomly sample from dataset
a = np.random.permutation(np.arange(csv.shape[0]))[:100]
csv_rand_sample = csv[a]
# Normalize
mean_unmask = np.mean(csv_rand_sample[:,3])
std_unmask = np.std(csv_rand_sample[:,3])
csv_rand_sample[:,3] = (csv_rand_sample[:,3]-mean_unmask)/std_unmask
#Stack matrix
if i == 1:
csv_rand = csv_rand_sample
else:
csv_rand = np.dstack((csv_rand,csv_rand_sample))
#Average across random samples
csv_rand = np.mean(csv_rand,axis=2)
#Independence Assumption
covar = np.cov(csv_rand_sample)
plt.figure(figsize=(7,7))
plt.imshow(covar)
plt.title('Covariance of Synapse Density dataset')
plt.colorbar()
plt.show()
diag = covar.diagonal()*np.eye(covar.shape[0])
hollow = covar-diag
d_det = np.linalg.slogdet(diag)[1]
h_det = np.linalg.slogdet(hollow)[1]
print d_det
print h_det
plt.figure(figsize=(11,8))
plt.subplot(121)
plt.imshow(diag)
plt.clim([0, np.max(covar)])
plt.title('Determinant of on-diagonal: ' + str(d_det))
plt.subplot(122)
plt.imshow(hollow)
plt.clim([0, np.max(covar)])
plt.title('Determinant of off-diagonal: ' + str(h_det))
plt.show()
print "Ratio of on and off-diagonal determinants: " + str(d_det/h_det)
| apache-2.0 |
enkripsi/gyp | test/msvs/list_excluded/gyptest-all.py | 347 | 1292 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that msvs_list_excluded_files=0 doesn't list files that would
normally be in _excluded_files, and that if that flag is not set, then they
are still listed.
"""
import os
import TestGyp
test = TestGyp.TestGyp(formats=['msvs'], workdir='workarea_all')
# with the flag set to 0
try:
os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_list_excluded_files=0'
test.run_gyp('hello_exclude.gyp')
finally:
del os.environ['GYP_GENERATOR_FLAGS']
if test.uses_msbuild:
test.must_not_contain('hello.vcxproj', 'hello_mac')
else:
test.must_not_contain('hello.vcproj', 'hello_mac')
# with the flag not set
test.run_gyp('hello_exclude.gyp')
if test.uses_msbuild:
test.must_contain('hello.vcxproj', 'hello_mac')
else:
test.must_contain('hello.vcproj', 'hello_mac')
# with the flag explicitly set to 1
try:
os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_list_excluded_files=1'
test.run_gyp('hello_exclude.gyp')
finally:
del os.environ['GYP_GENERATOR_FLAGS']
if test.uses_msbuild:
test.must_contain('hello.vcxproj', 'hello_mac')
else:
test.must_contain('hello.vcproj', 'hello_mac')
test.pass_test()
| bsd-3-clause |
pombredanne/http-repo.gem5.org-gem5- | tests/configs/base_config.py | 6 | 10647 | # Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
# Andreas Hansson
from abc import ABCMeta, abstractmethod
import m5
from m5.objects import *
from m5.proxy import *
m5.util.addToPath('../configs/common')
import FSConfig
from Caches import *
_have_kvm_support = 'BaseKvmCPU' in globals()
class BaseSystem(object):
"""Base system builder.
This class provides some basic functionality for creating an ARM
system with the usual peripherals (caches, GIC, etc.). It allows
customization by defining separate methods for different parts of
the initialization process.
"""
__metaclass__ = ABCMeta
def __init__(self, mem_mode='timing', mem_class=SimpleMemory,
cpu_class=TimingSimpleCPU, num_cpus=1, num_threads=1,
checker=False,
mem_size=None):
"""Initialize a simple base system.
Keyword Arguments:
mem_mode -- String describing the memory mode (timing or atomic)
mem_class -- Memory controller class to use
cpu_class -- CPU class to use
num_cpus -- Number of CPUs to instantiate
checker -- Set to True to add checker CPUs
mem_size -- Override the default memory size
"""
self.mem_mode = mem_mode
self.mem_class = mem_class
self.cpu_class = cpu_class
self.num_cpus = num_cpus
self.num_threads = num_threads
self.checker = checker
def create_cpus(self, cpu_clk_domain):
"""Return a list of CPU objects to add to a system."""
cpus = [ self.cpu_class(clk_domain=cpu_clk_domain,
numThreads=self.num_threads,
cpu_id=i)
for i in range(self.num_cpus) ]
if self.checker:
for c in cpus:
c.addCheckerCpu()
return cpus
def create_caches_private(self, cpu):
"""Add private caches to a CPU.
Arguments:
cpu -- CPU instance to work on.
"""
cpu.addPrivateSplitL1Caches(L1_ICache(size='32kB', assoc=1),
L1_DCache(size='32kB', assoc=4))
def create_caches_shared(self, system):
"""Add shared caches to a system.
Arguments:
system -- System to work on.
Returns:
A bus that CPUs should use to connect to the shared cache.
"""
system.toL2Bus = L2XBar(clk_domain=system.cpu_clk_domain)
system.l2c = L2Cache(clk_domain=system.cpu_clk_domain,
size='4MB', assoc=8)
system.l2c.cpu_side = system.toL2Bus.master
system.l2c.mem_side = system.membus.slave
return system.toL2Bus
def init_cpu(self, system, cpu, sha_bus):
"""Initialize a CPU.
Arguments:
system -- System to work on.
cpu -- CPU to initialize.
"""
if not cpu.switched_out:
self.create_caches_private(cpu)
cpu.createInterruptController()
cpu.connectAllPorts(sha_bus if sha_bus != None else system.membus,
system.membus)
def init_kvm(self, system):
"""Do KVM-specific system initialization.
Arguments:
system -- System to work on.
"""
system.vm = KvmVM()
def init_system(self, system):
"""Initialize a system.
Arguments:
system -- System to initialize.
"""
self.create_clk_src(system)
system.cpu = self.create_cpus(system.cpu_clk_domain)
if _have_kvm_support and \
any([isinstance(c, BaseKvmCPU) for c in system.cpu]):
self.init_kvm(system)
sha_bus = self.create_caches_shared(system)
for cpu in system.cpu:
self.init_cpu(system, cpu, sha_bus)
def create_clk_src(self,system):
# Create system clock domain. This provides clock value to every
# clocked object that lies beneath it unless explicitly overwritten
# by a different clock domain.
system.voltage_domain = VoltageDomain()
system.clk_domain = SrcClockDomain(clock = '1GHz',
voltage_domain =
system.voltage_domain)
# Create a seperate clock domain for components that should
# run at CPUs frequency
system.cpu_clk_domain = SrcClockDomain(clock = '2GHz',
voltage_domain =
system.voltage_domain)
@abstractmethod
def create_system(self):
"""Create an return an initialized system."""
pass
@abstractmethod
def create_root(self):
"""Create and return a simulation root using the system
defined by this class."""
pass
class BaseSESystem(BaseSystem):
"""Basic syscall-emulation builder."""
def __init__(self, **kwargs):
BaseSystem.__init__(self, **kwargs)
def init_system(self, system):
BaseSystem.init_system(self, system)
def create_system(self):
system = System(physmem = self.mem_class(),
membus = SystemXBar(),
mem_mode = self.mem_mode,
multi_thread = (self.num_threads > 1))
system.system_port = system.membus.slave
system.physmem.port = system.membus.master
self.init_system(system)
return system
def create_root(self):
system = self.create_system()
m5.ticks.setGlobalFrequency('1THz')
return Root(full_system=False, system=system)
class BaseSESystemUniprocessor(BaseSESystem):
"""Basic syscall-emulation builder for uniprocessor systems.
Note: This class is only really needed to provide backwards
compatibility in existing test cases.
"""
def __init__(self, **kwargs):
BaseSESystem.__init__(self, **kwargs)
def create_caches_private(self, cpu):
# The atomic SE configurations do not use caches
if self.mem_mode == "timing":
# @todo We might want to revisit these rather enthusiastic L1 sizes
cpu.addTwoLevelCacheHierarchy(L1_ICache(size='128kB'),
L1_DCache(size='256kB'),
L2Cache(size='2MB'))
def create_caches_shared(self, system):
return None
class BaseFSSystem(BaseSystem):
"""Basic full system builder."""
def __init__(self, **kwargs):
BaseSystem.__init__(self, **kwargs)
def init_system(self, system):
BaseSystem.init_system(self, system)
# create the memory controllers and connect them, stick with
# the physmem name to avoid bumping all the reference stats
system.physmem = [self.mem_class(range = r)
for r in system.mem_ranges]
for i in xrange(len(system.physmem)):
system.physmem[i].port = system.membus.master
# create the iocache, which by default runs at the system clock
system.iocache = IOCache(addr_ranges=system.mem_ranges)
system.iocache.cpu_side = system.iobus.master
system.iocache.mem_side = system.membus.slave
def create_root(self):
system = self.create_system()
m5.ticks.setGlobalFrequency('1THz')
return Root(full_system=True, system=system)
class BaseFSSystemUniprocessor(BaseFSSystem):
"""Basic full system builder for uniprocessor systems.
Note: This class is only really needed to provide backwards
compatibility in existing test cases.
"""
def __init__(self, **kwargs):
BaseFSSystem.__init__(self, **kwargs)
def create_caches_private(self, cpu):
cpu.addTwoLevelCacheHierarchy(L1_ICache(size='32kB', assoc=1),
L1_DCache(size='32kB', assoc=4),
L2Cache(size='4MB', assoc=8))
def create_caches_shared(self, system):
return None
class BaseFSSwitcheroo(BaseFSSystem):
"""Uniprocessor system prepared for CPU switching"""
def __init__(self, cpu_classes, **kwargs):
BaseFSSystem.__init__(self, **kwargs)
self.cpu_classes = tuple(cpu_classes)
def create_cpus(self, cpu_clk_domain):
cpus = [ cclass(clk_domain = cpu_clk_domain,
cpu_id=0,
switched_out=True)
for cclass in self.cpu_classes ]
cpus[0].switched_out = False
return cpus
| bsd-3-clause |
crtrott/lammps | python/examples/viz_atomeye.py | 25 | 1913 | #!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# viz_atomeye.py
# Purpose: viz running LAMMPS simulation via AtomEye
# Syntax: viz_atomeye.py in.lammps Nfreq Nsteps
# in.lammps = LAMMPS input script
# Nfreq = dump and viz shapshot every this many steps
# Nsteps = run for this many steps
import sys,os
# set this to point to AtomEye version 3 executable
# first line if want AtomEye output to screen, 2nd line to file
#ATOMEYE3 = "/home/sjplimp/tools/atomeye3/A3.i686-20060530"
ATOMEYE3 = "/home/sjplimp/tools/atomeye3/A3.i686-20060530 > atomeye.out"
# parse command line
argv = sys.argv
if len(argv) != 4:
print "Syntax: viz_atomeye.py in.lammps Nfreq Nsteps"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in extended CFG format for AtomEye
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all cfg %d tmp.cfg.* id type xs ys zs" % nfreq)
# initial 0-step run to generate dump file and image
lmp.command("run 0 pre yes post no")
ntimestep = 0
# wrapper on GL window via Pizza.py gl tool
# just proc 0 handles reading of dump file and viz
if me == 0:
a = os.popen(ATOMEYE3,'w')
a.write("load_config tmp.cfg.0\n")
a.flush()
# run nfreq steps at a time w/out pre/post, read dump snapshot, display it
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0:
a.write("load_config tmp.cfg.%d\n" % ntimestep)
a.flush()
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
| gpl-2.0 |
neraliu/tainted-phantomjs | src/breakpad/src/tools/gyp/gyptest.py | 137 | 7245 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner:
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered:
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files
if f.startswith('gyptest') and f.endswith('.py') ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
os.environ['PATH'] += ':' + ':'.join(opts.path)
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = [ {
'freebsd7': 'make',
'freebsd8': 'make',
'cygwin': 'msvs',
'win32': 'msvs',
'linux2': 'scons',
'darwin': 'xcode',
}[sys.platform] ]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
for test in tests:
status = cr.run([sys.executable, test],
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
ar7z1/ansible | lib/ansible/modules/commands/expect.py | 21 | 7569 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: expect
version_added: '2.0'
short_description: Executes a command and responds to prompts.
description:
- The C(expect) module executes a command and responds to prompts.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($HOME) and operations
like C("<"), C(">"), C("|"), and C("&") will not work.
options:
command:
description:
- The command module takes command to run.
required: true
creates:
description:
- A filename, when it already exists, this step will B(not) be run.
removes:
description:
- A filename, when it does not exist, this step will B(not) be run.
chdir:
description:
- Change into this directory before running the command.
responses:
description:
- Mapping of expected string/regex and string to respond with. If the
response is a list, successive matches return successive
responses. List functionality is new in 2.1.
required: true
timeout:
description:
- Amount of time in seconds to wait for the expected strings. Use
C(null) to disable timeout.
default: 30
echo:
description:
- Whether or not to echo out your response strings.
default: false
requirements:
- python >= 2.6
- pexpect >= 3.3
notes:
- If you want to run a command through the shell (say you are using C(<),
C(>), C(|), etc), you must specify a shell in the command such as
C(/bin/bash -c "/path/to/something | grep else").
- The question, or key, under I(responses) is a python regex match. Case
insensitive searches are indicated with a prefix of C(?i).
- By default, if a question is encountered multiple times, its string
response will be repeated. If you need different responses for successive
question matches, instead of a string response, use a list of strings as
the response. The list functionality is new in 2.1.
- The M(expect) module is designed for simple scenarios. For more complex
needs, consider the use of expect code with the M(shell) or M(script)
modules. (An example is part of the M(shell) module documentation)
author: "Matt Martz (@sivel)"
'''
EXAMPLES = r'''
- name: Case insensitive password string match
expect:
command: passwd username
responses:
(?i)password: "MySekretPa$$word"
# you don't want to show passwords in your logs
no_log: true
- name: Generic question with multiple different responses
expect:
command: /path/to/custom/command
responses:
Question:
- response1
- response2
- response3
'''
import datetime
import os
import traceback
try:
import pexpect
HAS_PEXPECT = True
except ImportError:
HAS_PEXPECT = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native, to_text
def response_closure(module, question, responses):
resp_gen = (u'%s\n' % to_text(r).rstrip(u'\n') for r in responses)
def wrapped(info):
try:
return next(resp_gen)
except StopIteration:
module.fail_json(msg="No remaining responses for '%s', "
"output was '%s'" %
(question,
info['child_result_list'][-1]))
return wrapped
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=True),
chdir=dict(type='path'),
creates=dict(type='path'),
removes=dict(type='path'),
responses=dict(type='dict', required=True),
timeout=dict(type='int', default=30),
echo=dict(type='bool', default=False),
)
)
if not HAS_PEXPECT:
module.fail_json(msg='The pexpect python module is required')
chdir = module.params['chdir']
args = module.params['command']
creates = module.params['creates']
removes = module.params['removes']
responses = module.params['responses']
timeout = module.params['timeout']
echo = module.params['echo']
events = dict()
for key, value in responses.items():
if isinstance(value, list):
response = response_closure(module, key, value)
else:
response = u'%s\n' % to_text(value).rstrip(u'\n')
events[to_text(key)] = response
if args.strip() == '':
module.fail_json(rc=256, msg="no command given")
if chdir:
chdir = os.path.abspath(chdir)
os.chdir(chdir)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
if os.path.exists(creates):
module.exit_json(
cmd=args,
stdout="skipped, since %s exists" % creates,
changed=False,
rc=0
)
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
if not os.path.exists(removes):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % removes,
changed=False,
rc=0
)
startd = datetime.datetime.now()
try:
try:
# Prefer pexpect.run from pexpect>=4
out, rc = pexpect.run(args, timeout=timeout, withexitstatus=True,
events=events, cwd=chdir, echo=echo,
encoding='utf-8')
except TypeError:
# Use pexpect.runu in pexpect>=3.3,<4
out, rc = pexpect.runu(args, timeout=timeout, withexitstatus=True,
events=events, cwd=chdir, echo=echo)
except (TypeError, AttributeError) as e:
# This should catch all insufficient versions of pexpect
# We deem them insufficient for their lack of ability to specify
# to not echo responses via the run/runu functions, which would
# potentially leak sensentive information
module.fail_json(msg='Insufficient version of pexpect installed '
'(%s), this module requires pexpect>=3.3. '
'Error was %s' % (pexpect.__version__, to_native(e)))
except pexpect.ExceptionPexpect as e:
module.fail_json(msg='%s' % to_native(e), exception=traceback.format_exc())
endd = datetime.datetime.now()
delta = endd - startd
if out is None:
out = ''
result = dict(
cmd=args,
stdout=out.rstrip('\r\n'),
rc=rc,
start=str(startd),
end=str(endd),
delta=str(delta),
changed=True,
)
if rc is None:
module.fail_json(msg='command exceeded timeout', **result)
elif rc != 0:
module.fail_json(msg='non-zero return code', **result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
tmerrick1/spack | lib/spack/spack/test/cmd/view.py | 4 | 6749 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack.main import SpackCommand
import os.path
import pytest
activate = SpackCommand('activate')
extensions = SpackCommand('extensions')
install = SpackCommand('install')
view = SpackCommand('view')
@pytest.mark.parametrize('cmd', ['hardlink', 'symlink', 'hard', 'add'])
def test_view_link_type(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery, cmd):
install('libdwarf')
viewpath = str(tmpdir.mkdir('view_{0}'.format(cmd)))
view(cmd, viewpath, 'libdwarf')
package_prefix = os.path.join(viewpath, 'libdwarf')
assert os.path.exists(package_prefix)
assert os.path.islink(package_prefix) == (not cmd.startswith('hard'))
def test_view_external(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('externaltool')
viewpath = str(tmpdir.mkdir('view'))
output = view('symlink', viewpath, 'externaltool')
assert 'Skipping external package: externaltool' in output
def test_view_extension(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
install('extension2@1.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
all_installed = extensions('--show', 'installed', 'extendee')
assert 'extension1@1.0' in all_installed
assert 'extension1@2.0' in all_installed
assert 'extension2@1.0' in all_installed
global_activated = extensions('--show', 'activated', 'extendee')
assert 'extension1@1.0' not in global_activated
assert 'extension1@2.0' not in global_activated
assert 'extension2@1.0' not in global_activated
view_activated = extensions('--show', 'activated',
'-v', viewpath,
'extendee')
assert 'extension1@1.0' in view_activated
assert 'extension1@2.0' not in view_activated
assert 'extension2@1.0' not in view_activated
assert os.path.exists(os.path.join(viewpath, 'bin', 'extension1'))
def test_view_extension_remove(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
view('remove', viewpath, 'extension1@1.0')
all_installed = extensions('--show', 'installed', 'extendee')
assert 'extension1@1.0' in all_installed
global_activated = extensions('--show', 'activated', 'extendee')
assert 'extension1@1.0' not in global_activated
view_activated = extensions('--show', 'activated',
'-v', viewpath,
'extendee')
assert 'extension1@1.0' not in view_activated
assert not os.path.exists(os.path.join(viewpath, 'bin', 'extension1'))
def test_view_extension_conflict(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
output = view('symlink', viewpath, 'extension1@2.0')
assert 'Package conflict detected' in output
def test_view_extension_conflict_ignored(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
view('symlink', viewpath, '-i', 'extension1@2.0')
with open(os.path.join(viewpath, 'bin', 'extension1'), 'r') as fin:
assert fin.read() == '1.0'
def test_view_extension_global_activation(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
install('extension2@1.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
activate('extension1@2.0')
activate('extension2@1.0')
all_installed = extensions('--show', 'installed', 'extendee')
assert 'extension1@1.0' in all_installed
assert 'extension1@2.0' in all_installed
assert 'extension2@1.0' in all_installed
global_activated = extensions('--show', 'activated', 'extendee')
assert 'extension1@1.0' not in global_activated
assert 'extension1@2.0' in global_activated
assert 'extension2@1.0' in global_activated
view_activated = extensions('--show', 'activated',
'-v', viewpath,
'extendee')
assert 'extension1@1.0' in view_activated
assert 'extension1@2.0' not in view_activated
assert 'extension2@1.0' not in view_activated
assert os.path.exists(os.path.join(viewpath, 'bin', 'extension1'))
assert not os.path.exists(os.path.join(viewpath, 'bin', 'extension2'))
def test_view_extendee_with_global_activations(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
install('extension2@1.0')
viewpath = str(tmpdir.mkdir('view'))
activate('extension1@2.0')
output = view('symlink', viewpath, 'extension1@1.0')
assert 'Error: Globally activated extensions cannot be used' in output
| lgpl-2.1 |
SUSE/kiwi | kiwi/storage/raid_device.py | 1 | 4198 | # Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
import os
import logging
# project
from kiwi.command import Command
from kiwi.storage.device_provider import DeviceProvider
from kiwi.storage.mapped_device import MappedDevice
from kiwi.exceptions import (
KiwiRaidSetupError
)
log = logging.getLogger('kiwi')
class RaidDevice(DeviceProvider):
"""
**Implement raid setup on a storage device**
:param object storage_provider: Instance of class based on DeviceProvider
"""
def __init__(self, storage_provider):
# bind the underlaying block device providing class instance
# to this object (e.g loop) if present. This is done to guarantee
# the correct destructor order when the device should be released.
self.storage_provider = storage_provider
self.raid_level_map = {
'mirroring': '1',
'striping': '0'
}
self.raid_device = None
def get_device(self):
"""
Instance of MappedDevice providing the raid device
:return: mapped raid device
:rtype: MappedDevice
"""
if self.raid_device:
return MappedDevice(
device=self.raid_device, device_provider=self
)
def create_degraded_raid(self, raid_level):
"""
Create a raid array in degraded mode with one device missing.
This only works in the raid levels 0(striping) and 1(mirroring)
:param string raid_level: raid level name
"""
if raid_level not in self.raid_level_map:
raise KiwiRaidSetupError(
'Only raid levels 0(striping) and 1(mirroring) are supported'
)
raid_device = None
for raid_id in range(9):
raid_device = '/dev/md' + format(raid_id)
if os.path.exists(raid_device):
raid_device = None
else:
break
if not raid_device:
raise KiwiRaidSetupError(
'Could not find free raid device in range md0-8'
)
log.info(
'Creating raid array in %s mode as %s',
raid_level, raid_device
)
Command.run(
[
'mdadm', '--create', '--run', raid_device,
'--level', self.raid_level_map[raid_level],
'--raid-disks', '2',
self.storage_provider.get_device(), 'missing'
]
)
self.raid_device = raid_device
def create_raid_config(self, filename):
"""
Create mdadm config file from mdadm request
:param string filename: config file name
"""
mdadm_call = Command.run(
['mdadm', '-Db', self.raid_device]
)
with open(filename, 'w') as mdadmconf:
mdadmconf.write(mdadm_call.output)
def is_loop(self):
"""
Check if storage provider is loop based
Return loop status from base storage provider
:return: True or False
:rtype: bool
"""
return self.storage_provider.is_loop()
def __del__(self):
if self.raid_device:
log.info('Cleaning up %s instance', type(self).__name__)
try:
Command.run(
['mdadm', '--stop', self.raid_device]
)
except Exception:
log.warning(
'Shutdown of raid device failed, %s still busy',
self.raid_device
)
| gpl-3.0 |
kisna72/django | django/db/models/query_utils.py | 184 | 11574 | """
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
from __future__ import unicode_literals
from collections import namedtuple
from django.apps import apps
from django.core.exceptions import FieldDoesNotExist
from django.db.backends import utils
from django.db.models.constants import LOOKUP_SEP
from django.utils import tree
# PathInfo is used when converting lookups (fk__somecol). The contents
# describe the relation in Model terms (model Options and Fields for both
# sides of the relation. The join_field is the field backing the relation.
PathInfo = namedtuple('PathInfo', 'from_opts to_opts target_fields join_field m2m direct')
class InvalidQuery(Exception):
"""
The query passed to raw isn't a safe query to use with raw.
"""
pass
class QueryWrapper(object):
"""
A type that indicates the contents are an SQL fragment and the associate
parameters. Can be used to pass opaque data to a where-clause, for example.
"""
contains_aggregate = False
def __init__(self, sql, params):
self.data = sql, list(params)
def as_sql(self, compiler=None, connection=None):
return self.data
class Q(tree.Node):
"""
Encapsulates filters as objects that can then be combined logically (using
& and |).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
def __init__(self, *args, **kwargs):
super(Q, self).__init__(children=list(args) + list(kwargs.items()))
def _combine(self, other, conn):
if not isinstance(other, Q):
raise TypeError(other)
obj = type(self)()
obj.connector = conn
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
def clone(self):
clone = self.__class__._new_instance(
children=[], connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, 'clone'):
clone.children.append(child.clone())
else:
clone.children.append(child)
return clone
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# We must promote any new joins to left outer joins so that when Q is
# used as an expression, rows aren't filtered due to joins.
clause, joins = query._add_q(self, reuse, allow_joins=allow_joins, split_subq=False)
query.promote_joins(joins)
return clause
@classmethod
def _refs_aggregate(cls, obj, existing_aggregates):
if not isinstance(obj, tree.Node):
aggregate, aggregate_lookups = refs_aggregate(obj[0].split(LOOKUP_SEP), existing_aggregates)
if not aggregate and hasattr(obj[1], 'refs_aggregate'):
return obj[1].refs_aggregate(existing_aggregates)
return aggregate, aggregate_lookups
for c in obj.children:
aggregate, aggregate_lookups = cls._refs_aggregate(c, existing_aggregates)
if aggregate:
return aggregate, aggregate_lookups
return False, ()
def refs_aggregate(self, existing_aggregates):
if not existing_aggregates:
return False
return self._refs_aggregate(self, existing_aggregates)
class DeferredAttribute(object):
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field_name, model):
self.field_name = field_name
def __get__(self, instance, owner):
"""
Retrieves and caches the value from the datastore on the first lookup.
Returns the cached value.
"""
non_deferred_model = instance._meta.proxy_for_model
opts = non_deferred_model._meta
assert instance is not None
data = instance.__dict__
if data.get(self.field_name, self) is self:
# self.field_name is the attname of the field, but only() takes the
# actual name, so we need to translate it here.
try:
f = opts.get_field(self.field_name)
except FieldDoesNotExist:
f = [f for f in opts.fields if f.attname == self.field_name][0]
name = f.name
# Let's see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance, name)
if val is None:
instance.refresh_from_db(fields=[self.field_name])
val = getattr(instance, self.field_name)
data[self.field_name] = val
return data[self.field_name]
def __set__(self, instance, value):
"""
Deferred loading attributes can be set normally (which means there will
never be a database lookup involved.
"""
instance.__dict__[self.field_name] = value
def _check_parent_chain(self, instance, name):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
f = opts.get_field(name)
link_field = opts.get_ancestor_link(f.model)
if f.primary_key and f != link_field:
return getattr(instance, link_field.attname)
return None
def select_related_descend(field, restricted, requested, load_fields, reverse=False):
"""
Returns True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_klass_info()).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* load_fields - the set of fields to be loaded on this model
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.remote_field:
return False
if field.remote_field.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
if load_fields:
if field.attname not in load_fields:
if restricted and field.name in requested:
raise InvalidQuery("Field %s.%s cannot be both deferred"
" and traversed using select_related"
" at the same time." %
(field.model._meta.object_name, field.name))
return False
return True
# This function is needed because data descriptors must be defined on a class
# object, not an instance, to have any effect.
def deferred_class_factory(model, attrs):
"""
Returns a class object that is a copy of "model" with the specified "attrs"
being replaced with DeferredAttribute objects. The "pk_value" ties the
deferred attributes to a particular instance of the model.
"""
if not attrs:
return model
# Never create deferred models based on deferred model
if model._deferred:
# Deferred models are proxies for the non-deferred model. We never
# create chains of defers => proxy_for_model is the non-deferred
# model.
model = model._meta.proxy_for_model
# The app registry wants a unique name for each model, otherwise the new
# class won't be created (we get an exception). Therefore, we generate
# the name using the passed in attrs. It's OK to reuse an existing class
# object if the attrs are identical.
name = "%s_Deferred_%s" % (model.__name__, '_'.join(sorted(attrs)))
name = utils.truncate_name(name, 80, 32)
try:
return apps.get_model(model._meta.app_label, name)
except LookupError:
class Meta:
proxy = True
app_label = model._meta.app_label
overrides = {attr: DeferredAttribute(attr, model) for attr in attrs}
overrides["Meta"] = Meta
overrides["__module__"] = model.__module__
overrides["_deferred"] = True
return type(str(name), (model,), overrides)
# The above function is also used to unpickle model instances with deferred
# fields.
deferred_class_factory.__safe_for_unpickling__ = True
def refs_aggregate(lookup_parts, aggregates):
"""
A helper method to check if the lookup_parts contains references
to the given aggregates set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for a match.
"""
for n in range(len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in aggregates and aggregates[level_n_lookup].contains_aggregate:
return aggregates[level_n_lookup], lookup_parts[n:]
return False, ()
def refs_expression(lookup_parts, annotations):
"""
A helper method to check if the lookup_parts contains references
to the given annotations set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for a match.
"""
for n in range(len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in annotations and annotations[level_n_lookup]:
return annotations[level_n_lookup], lookup_parts[n:]
return False, ()
def check_rel_lookup_compatibility(model, target_opts, field):
"""
Check that self.model is compatible with target_opts. Compatibility
is OK if:
1) model and opts match (where proxy inheritance is removed)
2) model is parent of opts' model or the other way around
"""
def check(opts):
return (
model._meta.concrete_model == opts.concrete_model or
opts.concrete_model in model._meta.get_parent_list() or
model in opts.get_parent_list()
)
# If the field is a primary key, then doing a query against the field's
# model is ok, too. Consider the case:
# class Restaurant(models.Model):
# place = OnetoOneField(Place, primary_key=True):
# Restaurant.objects.filter(pk__in=Restaurant.objects.all()).
# If we didn't have the primary key check, then pk__in (== place__in) would
# give Place's opts as the target opts, but Restaurant isn't compatible
# with that. This logic applies only to primary keys, as when doing __in=qs,
# we are going to turn this into __in=qs.values('pk') later on.
return (
check(target_opts) or
(getattr(field, 'primary_key', False) and check(field.model._meta))
)
| bsd-3-clause |
samba-team/samba | python/samba/provision/__init__.py | 1 | 99121 | # Unix SMB/CIFS implementation.
# backend code for provisioning a Samba AD server
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2012
# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2008-2009
# Copyright (C) Oliver Liebel <oliver@itc.li> 2008-2009
#
# Based on the original in EJS:
# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Functions for setting up a Samba configuration."""
__docformat__ = "restructuredText"
from base64 import b64encode
import errno
import os
import stat
import re
import pwd
import grp
import logging
import time
import uuid
import socket
import tempfile
import samba.dsdb
import ldb
from samba.auth import system_session, admin_session
from samba.auth_util import system_session_unix
import samba
from samba import auth
from samba.samba3 import smbd, passdb
from samba.samba3 import param as s3param
from samba.dsdb import DS_DOMAIN_FUNCTION_2000
from samba import (
Ldb,
MAX_NETBIOS_NAME_LEN,
check_all_substituted,
is_valid_netbios_char,
setup_file,
substitute_var,
valid_netbios_name,
version,
is_heimdal_built,
)
from samba.dcerpc import security, misc
from samba.dcerpc.misc import (
SEC_CHAN_BDC,
SEC_CHAN_WKSTA,
)
from samba.dsdb import (
DS_DOMAIN_FUNCTION_2003,
DS_DOMAIN_FUNCTION_2008_R2,
ENC_ALL_TYPES,
)
from samba.idmap import IDmapDB
from samba.ms_display_specifiers import read_ms_ldif
from samba.ntacls import setntacl, getntacl, dsacl2fsacl
from samba.ndr import ndr_pack, ndr_unpack
from samba.provision.backend import (
LDBBackend,
)
from samba.descriptor import (
get_empty_descriptor,
get_config_descriptor,
get_config_partitions_descriptor,
get_config_sites_descriptor,
get_config_ntds_quotas_descriptor,
get_config_delete_protected1_descriptor,
get_config_delete_protected1wd_descriptor,
get_config_delete_protected2_descriptor,
get_domain_descriptor,
get_domain_infrastructure_descriptor,
get_domain_builtin_descriptor,
get_domain_computers_descriptor,
get_domain_users_descriptor,
get_domain_controllers_descriptor,
get_domain_delete_protected1_descriptor,
get_domain_delete_protected2_descriptor,
get_dns_partition_descriptor,
get_dns_forest_microsoft_dns_descriptor,
get_dns_domain_microsoft_dns_descriptor,
get_managed_service_accounts_descriptor,
)
from samba.provision.common import (
setup_path,
setup_add_ldif,
setup_modify_ldif,
FILL_FULL,
FILL_SUBDOMAIN,
FILL_NT4SYNC,
FILL_DRS
)
from samba.provision.sambadns import (
get_dnsadmins_sid,
setup_ad_dns,
create_dns_dir_keytab_link,
create_dns_update_list
)
import samba.param
import samba.registry
from samba.schema import Schema
from samba.samdb import SamDB
from samba.dbchecker import dbcheck
from samba.provision.kerberos import create_kdc_conf
from samba.samdb import get_default_backend_store
DEFAULT_POLICY_GUID = "31B2F340-016D-11D2-945F-00C04FB984F9"
DEFAULT_DC_POLICY_GUID = "6AC1786C-016F-11D2-945F-00C04FB984F9"
DEFAULTSITE = "Default-First-Site-Name"
LAST_PROVISION_USN_ATTRIBUTE = "lastProvisionUSN"
DEFAULT_MIN_PWD_LENGTH = 7
class ProvisionPaths(object):
def __init__(self):
self.shareconf = None
self.hklm = None
self.hkcu = None
self.hkcr = None
self.hku = None
self.hkpd = None
self.hkpt = None
self.samdb = None
self.idmapdb = None
self.secrets = None
self.keytab = None
self.dns_keytab = None
self.dns = None
self.winsdb = None
self.private_dir = None
self.binddns_dir = None
self.state_dir = None
class ProvisionNames(object):
def __init__(self):
self.ncs = None
self.rootdn = None
self.domaindn = None
self.configdn = None
self.schemadn = None
self.dnsforestdn = None
self.dnsdomaindn = None
self.ldapmanagerdn = None
self.dnsdomain = None
self.realm = None
self.netbiosname = None
self.domain = None
self.hostname = None
self.sitename = None
self.smbconf = None
self.domainsid = None
self.forestsid = None
self.domainguid = None
self.name_map = {}
def find_provision_key_parameters(samdb, secretsdb, idmapdb, paths, smbconf,
lp):
"""Get key provision parameters (realm, domain, ...) from a given provision
:param samdb: An LDB object connected to the sam.ldb file
:param secretsdb: An LDB object connected to the secrets.ldb file
:param idmapdb: An LDB object connected to the idmap.ldb file
:param paths: A list of path to provision object
:param smbconf: Path to the smb.conf file
:param lp: A LoadParm object
:return: A list of key provision parameters
"""
names = ProvisionNames()
names.adminpass = None
# NT domain, kerberos realm, root dn, domain dn, domain dns name
names.domain = lp.get("workgroup").upper()
names.realm = lp.get("realm")
names.dnsdomain = names.realm.lower()
basedn = samba.dn_from_dns_name(names.dnsdomain)
names.realm = names.realm.upper()
# netbiosname
# Get the netbiosname first (could be obtained from smb.conf in theory)
res = secretsdb.search(expression="(flatname=%s)" %
names.domain, base="CN=Primary Domains",
scope=ldb.SCOPE_SUBTREE, attrs=["sAMAccountName"])
names.netbiosname = str(res[0]["sAMAccountName"]).replace("$", "")
names.smbconf = smbconf
# That's a bit simplistic but it's ok as long as we have only 3
# partitions
current = samdb.search(expression="(objectClass=*)",
base="", scope=ldb.SCOPE_BASE,
attrs=["defaultNamingContext", "schemaNamingContext",
"configurationNamingContext", "rootDomainNamingContext",
"namingContexts"])
names.configdn = str(current[0]["configurationNamingContext"][0])
names.schemadn = str(current[0]["schemaNamingContext"][0])
if not (ldb.Dn(samdb, basedn) == (ldb.Dn(samdb,
current[0]["defaultNamingContext"][0].decode('utf8')))):
raise ProvisioningError(("basedn in %s (%s) and from %s (%s)"
"is not the same ..." % (paths.samdb,
str(current[0]["defaultNamingContext"][0].decode('utf8')),
paths.smbconf, basedn)))
names.domaindn = str(current[0]["defaultNamingContext"][0])
names.rootdn = str(current[0]["rootDomainNamingContext"][0])
names.ncs = current[0]["namingContexts"]
names.dnsforestdn = None
names.dnsdomaindn = None
for i in range(0, len(names.ncs)):
nc = str(names.ncs[i])
dnsforestdn = "DC=ForestDnsZones,%s" % (str(names.rootdn))
if nc == dnsforestdn:
names.dnsforestdn = dnsforestdn
continue
dnsdomaindn = "DC=DomainDnsZones,%s" % (str(names.domaindn))
if nc == dnsdomaindn:
names.dnsdomaindn = dnsdomaindn
continue
# default site name
res3 = samdb.search(expression="(objectClass=site)",
base="CN=Sites," + str(names.configdn), scope=ldb.SCOPE_ONELEVEL, attrs=["cn"])
names.sitename = str(res3[0]["cn"])
# dns hostname and server dn
res4 = samdb.search(expression="(CN=%s)" % names.netbiosname,
base="OU=Domain Controllers,%s" % basedn,
scope=ldb.SCOPE_ONELEVEL, attrs=["dNSHostName"])
if len(res4) == 0:
raise ProvisioningError("Unable to find DC called CN=%s under OU=Domain Controllers,%s" % (names.netbiosname, basedn))
names.hostname = str(res4[0]["dNSHostName"]).replace("." + names.dnsdomain, "")
server_res = samdb.search(expression="serverReference=%s" % res4[0].dn,
attrs=[], base=names.configdn)
names.serverdn = str(server_res[0].dn)
# invocation id/objectguid
res5 = samdb.search(expression="(objectClass=*)",
base="CN=NTDS Settings,%s" % str(names.serverdn),
scope=ldb.SCOPE_BASE,
attrs=["invocationID", "objectGUID"])
names.invocation = str(ndr_unpack(misc.GUID, res5[0]["invocationId"][0]))
names.ntdsguid = str(ndr_unpack(misc.GUID, res5[0]["objectGUID"][0]))
# domain guid/sid
res6 = samdb.search(expression="(objectClass=*)", base=basedn,
scope=ldb.SCOPE_BASE, attrs=["objectGUID",
"objectSid", "msDS-Behavior-Version"])
names.domainguid = str(ndr_unpack(misc.GUID, res6[0]["objectGUID"][0]))
names.domainsid = ndr_unpack(security.dom_sid, res6[0]["objectSid"][0])
names.forestsid = ndr_unpack(security.dom_sid, res6[0]["objectSid"][0])
if res6[0].get("msDS-Behavior-Version") is None or \
int(res6[0]["msDS-Behavior-Version"][0]) < DS_DOMAIN_FUNCTION_2000:
names.domainlevel = DS_DOMAIN_FUNCTION_2000
else:
names.domainlevel = int(res6[0]["msDS-Behavior-Version"][0])
# policy guid
res7 = samdb.search(expression="(name={%s})" % DEFAULT_POLICY_GUID,
base="CN=Policies,CN=System," + basedn,
scope=ldb.SCOPE_ONELEVEL, attrs=["cn", "displayName"])
names.policyid = str(res7[0]["cn"]).replace("{", "").replace("}", "")
# dc policy guid
res8 = samdb.search(expression="(name={%s})" % DEFAULT_DC_POLICY_GUID,
base="CN=Policies,CN=System," + basedn,
scope=ldb.SCOPE_ONELEVEL,
attrs=["cn", "displayName"])
if len(res8) == 1:
names.policyid_dc = str(res8[0]["cn"]).replace("{", "").replace("}", "")
else:
names.policyid_dc = None
res9 = idmapdb.search(expression="(cn=%s-%s)" %
(str(names.domainsid), security.DOMAIN_RID_ADMINISTRATOR),
attrs=["xidNumber", "type"])
if len(res9) != 1:
raise ProvisioningError("Unable to find uid/gid for Domain Admins rid (%s-%s" % (str(names.domainsid), security.DOMAIN_RID_ADMINISTRATOR))
if str(res9[0]["type"][0]) == "ID_TYPE_BOTH":
names.root_gid = int(res9[0]["xidNumber"][0])
else:
names.root_gid = pwd.getpwuid(int(res9[0]["xidNumber"][0])).pw_gid
res10 = samdb.search(expression="(samaccountname=dns)",
scope=ldb.SCOPE_SUBTREE, attrs=["dn"],
controls=["search_options:1:2"])
if (len(res10) > 0):
has_legacy_dns_account = True
else:
has_legacy_dns_account = False
res11 = samdb.search(expression="(samaccountname=dns-%s)" % names.netbiosname,
scope=ldb.SCOPE_SUBTREE, attrs=["dn"],
controls=["search_options:1:2"])
if (len(res11) > 0):
has_dns_account = True
else:
has_dns_account = False
if names.dnsdomaindn is not None:
if has_dns_account:
names.dns_backend = 'BIND9_DLZ'
else:
names.dns_backend = 'SAMBA_INTERNAL'
elif has_dns_account or has_legacy_dns_account:
names.dns_backend = 'BIND9_FLATFILE'
else:
names.dns_backend = 'NONE'
dns_admins_sid = get_dnsadmins_sid(samdb, names.domaindn)
names.name_map['DnsAdmins'] = str(dns_admins_sid)
return names
def update_provision_usn(samdb, low, high, id, replace=False):
"""Update the field provisionUSN in sam.ldb
This field is used to track range of USN modified by provision and
upgradeprovision.
This value is used afterward by next provision to figure out if
the field have been modified since last provision.
:param samdb: An LDB object connect to sam.ldb
:param low: The lowest USN modified by this upgrade
:param high: The highest USN modified by this upgrade
:param id: The invocation id of the samba's dc
:param replace: A boolean indicating if the range should replace any
existing one or appended (default)
"""
tab = []
if not replace:
entry = samdb.search(base="@PROVISION",
scope=ldb.SCOPE_BASE,
attrs=[LAST_PROVISION_USN_ATTRIBUTE, "dn"])
for e in entry[0][LAST_PROVISION_USN_ATTRIBUTE]:
if not re.search(';', str(e)):
e = "%s;%s" % (str(e), id)
tab.append(str(e))
tab.append("%s-%s;%s" % (low, high, id))
delta = ldb.Message()
delta.dn = ldb.Dn(samdb, "@PROVISION")
delta[LAST_PROVISION_USN_ATTRIBUTE] = \
ldb.MessageElement(tab,
ldb.FLAG_MOD_REPLACE,
LAST_PROVISION_USN_ATTRIBUTE)
entry = samdb.search(expression='provisionnerID=*',
base="@PROVISION", scope=ldb.SCOPE_BASE,
attrs=["provisionnerID"])
if len(entry) == 0 or len(entry[0]) == 0:
delta["provisionnerID"] = ldb.MessageElement(id, ldb.FLAG_MOD_ADD, "provisionnerID")
samdb.modify(delta)
def set_provision_usn(samdb, low, high, id):
"""Set the field provisionUSN in sam.ldb
This field is used to track range of USN modified by provision and
upgradeprovision.
This value is used afterward by next provision to figure out if
the field have been modified since last provision.
:param samdb: An LDB object connect to sam.ldb
:param low: The lowest USN modified by this upgrade
:param high: The highest USN modified by this upgrade
:param id: The invocationId of the provision"""
tab = []
tab.append("%s-%s;%s" % (low, high, id))
delta = ldb.Message()
delta.dn = ldb.Dn(samdb, "@PROVISION")
delta[LAST_PROVISION_USN_ATTRIBUTE] = \
ldb.MessageElement(tab,
ldb.FLAG_MOD_ADD,
LAST_PROVISION_USN_ATTRIBUTE)
samdb.add(delta)
def get_max_usn(samdb, basedn):
""" This function return the biggest USN present in the provision
:param samdb: A LDB object pointing to the sam.ldb
:param basedn: A string containing the base DN of the provision
(ie. DC=foo, DC=bar)
:return: The biggest USN in the provision"""
res = samdb.search(expression="objectClass=*", base=basedn,
scope=ldb.SCOPE_SUBTREE, attrs=["uSNChanged"],
controls=["search_options:1:2",
"server_sort:1:1:uSNChanged",
"paged_results:1:1"])
return res[0]["uSNChanged"]
def get_last_provision_usn(sam):
"""Get USNs ranges modified by a provision or an upgradeprovision
:param sam: An LDB object pointing to the sam.ldb
:return: a dictionary which keys are invocation id and values are an array
of integer representing the different ranges
"""
try:
entry = sam.search(expression="%s=*" % LAST_PROVISION_USN_ATTRIBUTE,
base="@PROVISION", scope=ldb.SCOPE_BASE,
attrs=[LAST_PROVISION_USN_ATTRIBUTE, "provisionnerID"])
except ldb.LdbError as e1:
(ecode, emsg) = e1.args
if ecode == ldb.ERR_NO_SUCH_OBJECT:
return None
raise
if len(entry) > 0:
myids = []
range = {}
p = re.compile(r'-')
if entry[0].get("provisionnerID"):
for e in entry[0]["provisionnerID"]:
myids.append(str(e))
for r in entry[0][LAST_PROVISION_USN_ATTRIBUTE]:
tab1 = str(r).split(';')
if len(tab1) == 2:
id = tab1[1]
else:
id = "default"
if (len(myids) > 0 and id not in myids):
continue
tab2 = p.split(tab1[0])
if range.get(id) is None:
range[id] = []
range[id].append(tab2[0])
range[id].append(tab2[1])
return range
else:
return None
class ProvisionResult(object):
"""Result of a provision.
:ivar server_role: The server role
:ivar paths: ProvisionPaths instance
:ivar domaindn: The domain dn, as string
"""
def __init__(self):
self.server_role = None
self.paths = None
self.domaindn = None
self.lp = None
self.samdb = None
self.idmap = None
self.names = None
self.domainsid = None
self.adminpass_generated = None
self.adminpass = None
self.backend_result = None
def report_logger(self, logger):
"""Report this provision result to a logger."""
logger.info(
"Once the above files are installed, your Samba AD server will "
"be ready to use")
if self.adminpass_generated:
logger.info("Admin password: %s", self.adminpass)
logger.info("Server Role: %s", self.server_role)
logger.info("Hostname: %s", self.names.hostname)
logger.info("NetBIOS Domain: %s", self.names.domain)
logger.info("DNS Domain: %s", self.names.dnsdomain)
logger.info("DOMAIN SID: %s", self.domainsid)
if self.backend_result:
self.backend_result.report_logger(logger)
def findnss(nssfn, names):
"""Find a user or group from a list of possibilities.
:param nssfn: NSS Function to try (should raise KeyError if not found)
:param names: Names to check.
:return: Value return by first names list.
"""
for name in names:
try:
return nssfn(name)
except KeyError:
pass
raise KeyError("Unable to find user/group in %r" % names)
def findnss_uid(names):
return findnss(pwd.getpwnam, names)[2]
def findnss_gid(names):
return findnss(grp.getgrnam, names)[2]
def get_root_uid(root, logger):
try:
root_uid = findnss_uid(root)
except KeyError as e:
logger.info(e)
logger.info("Assuming root user has UID zero")
root_uid = 0
return root_uid
def provision_paths_from_lp(lp, dnsdomain):
"""Set the default paths for provisioning.
:param lp: Loadparm context.
:param dnsdomain: DNS Domain name
"""
paths = ProvisionPaths()
paths.private_dir = lp.get("private dir")
paths.binddns_dir = lp.get("binddns dir")
paths.state_dir = lp.get("state directory")
# This is stored without path prefix for the "privateKeytab" attribute in
# "secrets_dns.ldif".
paths.dns_keytab = "dns.keytab"
paths.keytab = "secrets.keytab"
paths.shareconf = os.path.join(paths.private_dir, "share.ldb")
paths.samdb = os.path.join(paths.private_dir, "sam.ldb")
paths.idmapdb = os.path.join(paths.private_dir, "idmap.ldb")
paths.secrets = os.path.join(paths.private_dir, "secrets.ldb")
paths.privilege = os.path.join(paths.private_dir, "privilege.ldb")
paths.dns_update_list = os.path.join(paths.private_dir, "dns_update_list")
paths.spn_update_list = os.path.join(paths.private_dir, "spn_update_list")
paths.krb5conf = os.path.join(paths.private_dir, "krb5.conf")
paths.kdcconf = os.path.join(paths.private_dir, "kdc.conf")
paths.winsdb = os.path.join(paths.private_dir, "wins.ldb")
paths.s4_ldapi_path = os.path.join(paths.private_dir, "ldapi")
paths.encrypted_secrets_key_path = os.path.join(
paths.private_dir,
"encrypted_secrets.key")
paths.dns = os.path.join(paths.binddns_dir, "dns", dnsdomain + ".zone")
paths.namedconf = os.path.join(paths.binddns_dir, "named.conf")
paths.namedconf_update = os.path.join(paths.binddns_dir, "named.conf.update")
paths.namedtxt = os.path.join(paths.binddns_dir, "named.txt")
paths.hklm = "hklm.ldb"
paths.hkcr = "hkcr.ldb"
paths.hkcu = "hkcu.ldb"
paths.hku = "hku.ldb"
paths.hkpd = "hkpd.ldb"
paths.hkpt = "hkpt.ldb"
paths.sysvol = lp.get("path", "sysvol")
paths.netlogon = lp.get("path", "netlogon")
paths.smbconf = lp.configfile
return paths
def determine_netbios_name(hostname):
"""Determine a netbios name from a hostname."""
# remove forbidden chars and force the length to be <16
netbiosname = "".join([x for x in hostname if is_valid_netbios_char(x)])
return netbiosname[:MAX_NETBIOS_NAME_LEN].upper()
def guess_names(lp=None, hostname=None, domain=None, dnsdomain=None,
serverrole=None, rootdn=None, domaindn=None, configdn=None,
schemadn=None, serverdn=None, sitename=None,
domain_names_forced=False):
"""Guess configuration settings to use."""
if hostname is None:
hostname = socket.gethostname().split(".")[0]
netbiosname = lp.get("netbios name")
if netbiosname is None:
netbiosname = determine_netbios_name(hostname)
netbiosname = netbiosname.upper()
if not valid_netbios_name(netbiosname):
raise InvalidNetbiosName(netbiosname)
if dnsdomain is None:
dnsdomain = lp.get("realm")
if dnsdomain is None or dnsdomain == "":
raise ProvisioningError(
"guess_names: 'realm' not specified in supplied %s!" %
lp.configfile)
dnsdomain = dnsdomain.lower()
if serverrole is None:
serverrole = lp.get("server role")
if serverrole is None:
raise ProvisioningError("guess_names: 'server role' not specified in supplied %s!" % lp.configfile)
serverrole = serverrole.lower()
realm = dnsdomain.upper()
if lp.get("realm") == "":
raise ProvisioningError("guess_names: 'realm =' was not specified in supplied %s. Please remove the smb.conf file and let provision generate it" % lp.configfile)
if lp.get("realm").upper() != realm:
raise ProvisioningError("guess_names: 'realm=%s' in %s must match chosen realm '%s'! Please remove the smb.conf file and let provision generate it" % (lp.get("realm").upper(), lp.configfile, realm))
if lp.get("server role").lower() != serverrole:
raise ProvisioningError("guess_names: 'server role=%s' in %s must match chosen server role '%s'! Please remove the smb.conf file and let provision generate it" % (lp.get("server role"), lp.configfile, serverrole))
if serverrole == "active directory domain controller":
if domain is None:
# This will, for better or worse, default to 'WORKGROUP'
domain = lp.get("workgroup")
domain = domain.upper()
if lp.get("workgroup").upper() != domain:
raise ProvisioningError("guess_names: Workgroup '%s' in smb.conf must match chosen domain '%s'! Please remove the %s file and let provision generate it" % (lp.get("workgroup").upper(), domain, lp.configfile))
if domaindn is None:
domaindn = samba.dn_from_dns_name(dnsdomain)
if domain == netbiosname:
raise ProvisioningError("guess_names: Domain '%s' must not be equal to short host name '%s'!" % (domain, netbiosname))
else:
domain = netbiosname
if domaindn is None:
domaindn = "DC=" + netbiosname
if not valid_netbios_name(domain):
raise InvalidNetbiosName(domain)
if hostname.upper() == realm:
raise ProvisioningError("guess_names: Realm '%s' must not be equal to hostname '%s'!" % (realm, hostname))
if netbiosname.upper() == realm:
raise ProvisioningError("guess_names: Realm '%s' must not be equal to NetBIOS hostname '%s'!" % (realm, netbiosname))
if domain == realm and not domain_names_forced:
raise ProvisioningError("guess_names: Realm '%s' must not be equal to short domain name '%s'!" % (realm, domain))
if serverrole != "active directory domain controller":
#
# This is the code path for a domain member
# where we provision the database as if we where
# on a domain controller, so we should not use
# the same dnsdomain as the domain controllers
# of our primary domain.
#
# This will be important if we start doing
# SID/name filtering and reject the local
# sid and names if they come from a domain
# controller.
#
realm = netbiosname
dnsdomain = netbiosname.lower()
if rootdn is None:
rootdn = domaindn
if configdn is None:
configdn = "CN=Configuration," + rootdn
if schemadn is None:
schemadn = "CN=Schema," + configdn
if sitename is None:
sitename = DEFAULTSITE
names = ProvisionNames()
names.rootdn = rootdn
names.domaindn = domaindn
names.configdn = configdn
names.schemadn = schemadn
names.ldapmanagerdn = "CN=Manager," + rootdn
names.dnsdomain = dnsdomain
names.domain = domain
names.realm = realm
names.netbiosname = netbiosname
names.hostname = hostname
names.sitename = sitename
names.serverdn = "CN=%s,CN=Servers,CN=%s,CN=Sites,%s" % (
netbiosname, sitename, configdn)
return names
def make_smbconf(smbconf, hostname, domain, realm, targetdir,
serverrole=None, eadb=False, use_ntvfs=False, lp=None,
global_param=None):
"""Create a new smb.conf file based on a couple of basic settings.
"""
assert smbconf is not None
if hostname is None:
hostname = socket.gethostname().split(".")[0]
netbiosname = determine_netbios_name(hostname)
if serverrole is None:
serverrole = "standalone server"
assert domain is not None
domain = domain.upper()
assert realm is not None
realm = realm.upper()
global_settings = {
"netbios name": netbiosname,
"workgroup": domain,
"realm": realm,
"server role": serverrole,
}
if lp is None:
lp = samba.param.LoadParm()
# Load non-existent file
if os.path.exists(smbconf):
lp.load(smbconf)
if global_param is not None:
for ent in global_param:
if global_param[ent] is not None:
global_settings[ent] = " ".join(global_param[ent])
if targetdir is not None:
global_settings["private dir"] = os.path.abspath(os.path.join(targetdir, "private"))
global_settings["lock dir"] = os.path.abspath(targetdir)
global_settings["state directory"] = os.path.abspath(os.path.join(targetdir, "state"))
global_settings["cache directory"] = os.path.abspath(os.path.join(targetdir, "cache"))
global_settings["binddns dir"] = os.path.abspath(os.path.join(targetdir, "bind-dns"))
lp.set("lock dir", os.path.abspath(targetdir))
lp.set("state directory", global_settings["state directory"])
lp.set("cache directory", global_settings["cache directory"])
lp.set("binddns dir", global_settings["binddns dir"])
if eadb:
if use_ntvfs:
if targetdir is not None:
privdir = os.path.join(targetdir, "private")
lp.set("posix:eadb",
os.path.abspath(os.path.join(privdir, "eadb.tdb")))
elif not lp.get("posix:eadb"):
privdir = lp.get("private dir")
lp.set("posix:eadb",
os.path.abspath(os.path.join(privdir, "eadb.tdb")))
else:
if targetdir is not None:
statedir = os.path.join(targetdir, "state")
lp.set("xattr_tdb:file",
os.path.abspath(os.path.join(statedir, "xattr.tdb")))
elif not lp.get("xattr_tdb:file"):
statedir = lp.get("state directory")
lp.set("xattr_tdb:file",
os.path.abspath(os.path.join(statedir, "xattr.tdb")))
shares = {}
if serverrole == "active directory domain controller":
shares["sysvol"] = os.path.join(lp.get("state directory"), "sysvol")
shares["netlogon"] = os.path.join(shares["sysvol"], realm.lower(),
"scripts")
else:
global_settings["passdb backend"] = "samba_dsdb"
f = open(smbconf, 'w')
try:
f.write("[globals]\n")
for key, val in global_settings.items():
f.write("\t%s = %s\n" % (key, val))
f.write("\n")
for name, path in shares.items():
f.write("[%s]\n" % name)
f.write("\tpath = %s\n" % path)
f.write("\tread only = no\n")
f.write("\n")
finally:
f.close()
# reload the smb.conf
lp.load(smbconf)
# and dump it without any values that are the default
# this ensures that any smb.conf parameters that were set
# on the provision/join command line are set in the resulting smb.conf
lp.dump(False, smbconf)
def setup_name_mappings(idmap, sid, root_uid, nobody_uid,
users_gid, root_gid):
"""setup reasonable name mappings for sam names to unix names.
:param samdb: SamDB object.
:param idmap: IDmap db object.
:param sid: The domain sid.
:param domaindn: The domain DN.
:param root_uid: uid of the UNIX root user.
:param nobody_uid: uid of the UNIX nobody user.
:param users_gid: gid of the UNIX users group.
:param root_gid: gid of the UNIX root group.
"""
idmap.setup_name_mapping("S-1-5-7", idmap.TYPE_UID, nobody_uid)
idmap.setup_name_mapping(sid + "-500", idmap.TYPE_UID, root_uid)
idmap.setup_name_mapping(sid + "-513", idmap.TYPE_GID, users_gid)
def setup_samdb_partitions(samdb_path, logger, lp, session_info,
provision_backend, names, serverrole,
erase=False, plaintext_secrets=False,
backend_store=None,backend_store_size=None):
"""Setup the partitions for the SAM database.
Alternatively, provision() may call this, and then populate the database.
:note: This will wipe the Sam Database!
:note: This function always removes the local SAM LDB file. The erase
parameter controls whether to erase the existing data, which
may not be stored locally but in LDAP.
"""
assert session_info is not None
# We use options=["modules:"] to stop the modules loading - we
# just want to wipe and re-initialise the database, not start it up
try:
os.unlink(samdb_path)
except OSError:
pass
samdb = Ldb(url=samdb_path, session_info=session_info,
lp=lp, options=["modules:"])
ldap_backend_line = "# No LDAP backend"
if provision_backend.type != "ldb":
ldap_backend_line = "ldapBackend: %s" % provision_backend.ldap_uri
required_features = None
if not plaintext_secrets:
required_features = "requiredFeatures: encryptedSecrets"
if backend_store is None:
backend_store = get_default_backend_store()
backend_store_line = "backendStore: %s" % backend_store
if backend_store == "mdb":
if required_features is not None:
required_features += "\n"
else:
required_features = ""
required_features += "requiredFeatures: lmdbLevelOne"
if required_features is None:
required_features = "# No required features"
samdb.transaction_start()
try:
logger.info("Setting up sam.ldb partitions and settings")
setup_add_ldif(samdb, setup_path("provision_partitions.ldif"), {
"LDAP_BACKEND_LINE": ldap_backend_line,
"BACKEND_STORE": backend_store_line
})
setup_add_ldif(samdb, setup_path("provision_init.ldif"), {
"BACKEND_TYPE": provision_backend.type,
"SERVER_ROLE": serverrole,
"REQUIRED_FEATURES": required_features
})
logger.info("Setting up sam.ldb rootDSE")
setup_samdb_rootdse(samdb, names)
except:
samdb.transaction_cancel()
raise
else:
samdb.transaction_commit()
def secretsdb_self_join(secretsdb, domain,
netbiosname, machinepass, domainsid=None,
realm=None, dnsdomain=None,
keytab_path=None,
key_version_number=1,
secure_channel_type=SEC_CHAN_WKSTA):
"""Add domain join-specific bits to a secrets database.
:param secretsdb: Ldb Handle to the secrets database
:param machinepass: Machine password
"""
attrs = ["whenChanged",
"secret",
"priorSecret",
"priorChanged",
"krb5Keytab",
"privateKeytab"]
if realm is not None:
if dnsdomain is None:
dnsdomain = realm.lower()
dnsname = '%s.%s' % (netbiosname.lower(), dnsdomain.lower())
else:
dnsname = None
shortname = netbiosname.lower()
# We don't need to set msg["flatname"] here, because rdn_name will handle
# it, and it causes problems for modifies anyway
msg = ldb.Message(ldb.Dn(secretsdb, "flatname=%s,cn=Primary Domains" % domain))
msg["secureChannelType"] = [str(secure_channel_type)]
msg["objectClass"] = ["top", "primaryDomain"]
if dnsname is not None:
msg["objectClass"] = ["top", "primaryDomain", "kerberosSecret"]
msg["realm"] = [realm]
msg["saltPrincipal"] = ["host/%s@%s" % (dnsname, realm.upper())]
msg["msDS-KeyVersionNumber"] = [str(key_version_number)]
msg["privateKeytab"] = ["secrets.keytab"]
msg["secret"] = [machinepass.encode('utf-8')]
msg["samAccountName"] = ["%s$" % netbiosname]
msg["secureChannelType"] = [str(secure_channel_type)]
if domainsid is not None:
msg["objectSid"] = [ndr_pack(domainsid)]
# This complex expression tries to ensure that we don't have more
# than one record for this SID, realm or netbios domain at a time,
# but we don't delete the old record that we are about to modify,
# because that would delete the keytab and previous password.
res = secretsdb.search(base="cn=Primary Domains", attrs=attrs,
expression=("(&(|(flatname=%s)(realm=%s)(objectSid=%s))(objectclass=primaryDomain)(!(distinguishedName=%s)))" % (domain, realm, str(domainsid), str(msg.dn))),
scope=ldb.SCOPE_ONELEVEL)
for del_msg in res:
secretsdb.delete(del_msg.dn)
res = secretsdb.search(base=msg.dn, attrs=attrs, scope=ldb.SCOPE_BASE)
if len(res) == 1:
msg["priorSecret"] = [res[0]["secret"][0]]
try:
msg["priorWhenChanged"] = [res[0]["whenChanged"][0]]
except KeyError:
pass
try:
msg["privateKeytab"] = [res[0]["privateKeytab"][0]]
except KeyError:
pass
try:
msg["krb5Keytab"] = [res[0]["krb5Keytab"][0]]
except KeyError:
pass
for el in msg:
if el != 'dn':
msg[el].set_flags(ldb.FLAG_MOD_REPLACE)
secretsdb.modify(msg)
secretsdb.rename(res[0].dn, msg.dn)
else:
spn = ['HOST/%s' % shortname]
if secure_channel_type == SEC_CHAN_BDC and dnsname is not None:
# we are a domain controller then we add servicePrincipalName
# entries for the keytab code to update.
spn.extend(['HOST/%s' % dnsname])
msg["servicePrincipalName"] = spn
secretsdb.add(msg)
def setup_secretsdb(paths, session_info, lp):
"""Setup the secrets database.
:note: This function does not handle exceptions and transaction on purpose,
it's up to the caller to do this job.
:param path: Path to the secrets database.
:param session_info: Session info.
:param credentials: Credentials
:param lp: Loadparm context
:return: LDB handle for the created secrets database
"""
if os.path.exists(paths.secrets):
os.unlink(paths.secrets)
keytab_path = os.path.join(paths.private_dir, paths.keytab)
if os.path.exists(keytab_path):
os.unlink(keytab_path)
bind_dns_keytab_path = os.path.join(paths.binddns_dir, paths.dns_keytab)
if os.path.exists(bind_dns_keytab_path):
os.unlink(bind_dns_keytab_path)
dns_keytab_path = os.path.join(paths.private_dir, paths.dns_keytab)
if os.path.exists(dns_keytab_path):
os.unlink(dns_keytab_path)
path = paths.secrets
secrets_ldb = Ldb(path, session_info=session_info, lp=lp)
secrets_ldb.erase()
secrets_ldb.load_ldif_file_add(setup_path("secrets_init.ldif"))
secrets_ldb = Ldb(path, session_info=session_info, lp=lp)
secrets_ldb.transaction_start()
try:
secrets_ldb.load_ldif_file_add(setup_path("secrets.ldif"))
except:
secrets_ldb.transaction_cancel()
raise
return secrets_ldb
def setup_privileges(path, session_info, lp):
"""Setup the privileges database.
:param path: Path to the privileges database.
:param session_info: Session info.
:param credentials: Credentials
:param lp: Loadparm context
:return: LDB handle for the created secrets database
"""
if os.path.exists(path):
os.unlink(path)
privilege_ldb = Ldb(path, session_info=session_info, lp=lp)
privilege_ldb.erase()
privilege_ldb.load_ldif_file_add(setup_path("provision_privilege.ldif"))
def setup_encrypted_secrets_key(path):
"""Setup the encrypted secrets key file.
Any existing key file will be deleted and a new random key generated.
:param path: Path to the secrets key file.
"""
if os.path.exists(path):
os.unlink(path)
flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
mode = stat.S_IRUSR | stat.S_IWUSR
umask_original = os.umask(0)
try:
fd = os.open(path, flags, mode)
finally:
os.umask(umask_original)
with os.fdopen(fd, 'wb') as f:
key = samba.generate_random_bytes(16)
f.write(key)
def setup_registry(path, session_info, lp):
"""Setup the registry.
:param path: Path to the registry database
:param session_info: Session information
:param credentials: Credentials
:param lp: Loadparm context
"""
reg = samba.registry.Registry()
hive = samba.registry.open_ldb(path, session_info=session_info, lp_ctx=lp)
reg.mount_hive(hive, samba.registry.HKEY_LOCAL_MACHINE)
provision_reg = setup_path("provision.reg")
assert os.path.exists(provision_reg)
reg.diff_apply(provision_reg)
def setup_idmapdb(path, session_info, lp):
"""Setup the idmap database.
:param path: path to the idmap database
:param session_info: Session information
:param credentials: Credentials
:param lp: Loadparm context
"""
if os.path.exists(path):
os.unlink(path)
idmap_ldb = IDmapDB(path, session_info=session_info, lp=lp)
idmap_ldb.erase()
idmap_ldb.load_ldif_file_add(setup_path("idmap_init.ldif"))
return idmap_ldb
def setup_samdb_rootdse(samdb, names):
"""Setup the SamDB rootdse.
:param samdb: Sam Database handle
"""
setup_add_ldif(samdb, setup_path("provision_rootdse_add.ldif"), {
"SCHEMADN": names.schemadn,
"DOMAINDN": names.domaindn,
"ROOTDN": names.rootdn,
"CONFIGDN": names.configdn,
"SERVERDN": names.serverdn,
})
def setup_self_join(samdb, admin_session_info, names, fill, machinepass,
dns_backend, dnspass, domainsid, next_rid, invocationid,
policyguid, policyguid_dc,
domainControllerFunctionality, ntdsguid=None, dc_rid=None):
"""Join a host to its own domain."""
assert isinstance(invocationid, str)
if ntdsguid is not None:
ntdsguid_line = "objectGUID: %s\n" % ntdsguid
else:
ntdsguid_line = ""
if dc_rid is None:
dc_rid = next_rid
setup_add_ldif(samdb, setup_path("provision_self_join.ldif"), {
"CONFIGDN": names.configdn,
"SCHEMADN": names.schemadn,
"DOMAINDN": names.domaindn,
"SERVERDN": names.serverdn,
"INVOCATIONID": invocationid,
"NETBIOSNAME": names.netbiosname,
"DNSNAME": "%s.%s" % (names.hostname, names.dnsdomain),
"MACHINEPASS_B64": b64encode(machinepass.encode('utf-16-le')).decode('utf8'),
"DOMAINSID": str(domainsid),
"DCRID": str(dc_rid),
"SAMBA_VERSION_STRING": version,
"NTDSGUID": ntdsguid_line,
"DOMAIN_CONTROLLER_FUNCTIONALITY": str(
domainControllerFunctionality),
"RIDALLOCATIONSTART": str(next_rid + 100),
"RIDALLOCATIONEND": str(next_rid + 100 + 499)})
setup_add_ldif(samdb, setup_path("provision_group_policy.ldif"), {
"POLICYGUID": policyguid,
"POLICYGUID_DC": policyguid_dc,
"DNSDOMAIN": names.dnsdomain,
"DOMAINDN": names.domaindn})
# If we are setting up a subdomain, then this has been replicated in, so we
# don't need to add it
if fill == FILL_FULL:
setup_add_ldif(samdb, setup_path("provision_self_join_config.ldif"), {
"CONFIGDN": names.configdn,
"SCHEMADN": names.schemadn,
"DOMAINDN": names.domaindn,
"SERVERDN": names.serverdn,
"INVOCATIONID": invocationid,
"NETBIOSNAME": names.netbiosname,
"DNSNAME": "%s.%s" % (names.hostname, names.dnsdomain),
"MACHINEPASS_B64": b64encode(machinepass.encode('utf-16-le')).decode('utf8'),
"DOMAINSID": str(domainsid),
"DCRID": str(dc_rid),
"SAMBA_VERSION_STRING": version,
"NTDSGUID": ntdsguid_line,
"DOMAIN_CONTROLLER_FUNCTIONALITY": str(
domainControllerFunctionality)})
# Setup fSMORoleOwner entries to point at the newly created DC entry
setup_modify_ldif(samdb,
setup_path("provision_self_join_modify_schema.ldif"), {
"SCHEMADN": names.schemadn,
"SERVERDN": names.serverdn,
},
controls=["provision:0", "relax:0"])
setup_modify_ldif(samdb,
setup_path("provision_self_join_modify_config.ldif"), {
"CONFIGDN": names.configdn,
"DEFAULTSITE": names.sitename,
"NETBIOSNAME": names.netbiosname,
"SERVERDN": names.serverdn,
})
system_session_info = system_session()
samdb.set_session_info(system_session_info)
# Setup fSMORoleOwner entries to point at the newly created DC entry to
# modify a serverReference under cn=config when we are a subdomain, we must
# be system due to ACLs
setup_modify_ldif(samdb, setup_path("provision_self_join_modify.ldif"), {
"DOMAINDN": names.domaindn,
"SERVERDN": names.serverdn,
"NETBIOSNAME": names.netbiosname,
})
samdb.set_session_info(admin_session_info)
if dns_backend != "SAMBA_INTERNAL":
# This is Samba4 specific and should be replaced by the correct
# DNS AD-style setup
setup_add_ldif(samdb, setup_path("provision_dns_add_samba.ldif"), {
"DNSDOMAIN": names.dnsdomain,
"DOMAINDN": names.domaindn,
"DNSPASS_B64": b64encode(dnspass.encode('utf-16-le')).decode('utf8'),
"HOSTNAME": names.hostname,
"DNSNAME": '%s.%s' % (
names.netbiosname.lower(), names.dnsdomain.lower())
})
def getpolicypath(sysvolpath, dnsdomain, guid):
"""Return the physical path of policy given its guid.
:param sysvolpath: Path to the sysvol folder
:param dnsdomain: DNS name of the AD domain
:param guid: The GUID of the policy
:return: A string with the complete path to the policy folder
"""
if guid[0] != "{":
guid = "{%s}" % guid
policy_path = os.path.join(sysvolpath, dnsdomain, "Policies", guid)
return policy_path
def create_gpo_struct(policy_path):
if not os.path.exists(policy_path):
os.makedirs(policy_path, 0o775)
f = open(os.path.join(policy_path, "GPT.INI"), 'w')
try:
f.write("[General]\r\nVersion=0")
finally:
f.close()
p = os.path.join(policy_path, "MACHINE")
if not os.path.exists(p):
os.makedirs(p, 0o775)
p = os.path.join(policy_path, "USER")
if not os.path.exists(p):
os.makedirs(p, 0o775)
def create_default_gpo(sysvolpath, dnsdomain, policyguid, policyguid_dc):
"""Create the default GPO for a domain
:param sysvolpath: Physical path for the sysvol folder
:param dnsdomain: DNS domain name of the AD domain
:param policyguid: GUID of the default domain policy
:param policyguid_dc: GUID of the default domain controler policy
"""
policy_path = getpolicypath(sysvolpath, dnsdomain, policyguid)
create_gpo_struct(policy_path)
policy_path = getpolicypath(sysvolpath, dnsdomain, policyguid_dc)
create_gpo_struct(policy_path)
# Default the database size to 8Gb
DEFAULT_BACKEND_SIZE = 8 * 1024 * 1024 *1024
def setup_samdb(path, session_info, provision_backend, lp, names,
logger, fill, serverrole, schema, am_rodc=False,
plaintext_secrets=False, backend_store=None,
backend_store_size=None, batch_mode=False):
"""Setup a complete SAM Database.
:note: This will wipe the main SAM database file!
"""
# Also wipes the database
setup_samdb_partitions(path, logger=logger, lp=lp,
provision_backend=provision_backend, session_info=session_info,
names=names, serverrole=serverrole, plaintext_secrets=plaintext_secrets,
backend_store=backend_store,
backend_store_size=backend_store_size)
store_size = DEFAULT_BACKEND_SIZE
if backend_store_size:
store_size = backend_store_size
options = []
if backend_store == "mdb":
options.append("lmdb_env_size:" + str(store_size))
if batch_mode:
options.append("batch_mode:1")
if batch_mode:
# Estimate the number of index records in the transaction_index_cache
# Numbers chosen give the prime 202481 for the default backend size,
# which works well for a 100,000 user database
cache_size = int(store_size / 42423) + 1
options.append("transaction_index_cache_size:" + str(cache_size))
# Load the database, but don's load the global schema and don't connect
# quite yet
samdb = SamDB(session_info=session_info, url=None, auto_connect=False,
lp=lp,
global_schema=False, am_rodc=am_rodc, options=options)
logger.info("Pre-loading the Samba 4 and AD schema")
# Load the schema from the one we computed earlier
samdb.set_schema(schema, write_indices_and_attributes=False)
# Set the NTDS settings DN manually - in order to have it already around
# before the provisioned tree exists and we connect
samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" % names.serverdn)
# And now we can connect to the DB - the schema won't be loaded from the
# DB
try:
samdb.connect(path, options=options)
except ldb.LdbError as e2:
(num, string_error) = e2.args
if (num == ldb.ERR_INSUFFICIENT_ACCESS_RIGHTS):
raise ProvisioningError("Permission denied connecting to %s, are you running as root?" % path)
else:
raise
# But we have to give it one more kick to have it use the schema
# during provision - it needs, now that it is connected, to write
# the schema @ATTRIBUTES and @INDEXLIST records to the database.
samdb.set_schema(schema, write_indices_and_attributes=True)
return samdb
def fill_samdb(samdb, lp, names, logger, policyguid,
policyguid_dc, fill, adminpass, krbtgtpass, machinepass, dns_backend,
dnspass, invocationid, ntdsguid, serverrole, am_rodc=False,
dom_for_fun_level=None, schema=None, next_rid=None, dc_rid=None,
backend_store=None,
backend_store_size=None):
if next_rid is None:
next_rid = 1000
# Provision does not make much sense values larger than 1000000000
# as the upper range of the rIDAvailablePool is 1073741823 and
# we don't want to create a domain that cannot allocate rids.
if next_rid < 1000 or next_rid > 1000000000:
error = "You want to run SAMBA 4 with a next_rid of %u, " % (next_rid)
error += "the valid range is %u-%u. The default is %u." % (
1000, 1000000000, 1000)
raise ProvisioningError(error)
# ATTENTION: Do NOT change these default values without discussion with the
# team and/or release manager. They have a big impact on the whole program!
domainControllerFunctionality = DS_DOMAIN_FUNCTION_2008_R2
if dom_for_fun_level is None:
dom_for_fun_level = DS_DOMAIN_FUNCTION_2008_R2
if dom_for_fun_level > domainControllerFunctionality:
raise ProvisioningError("You want to run SAMBA 4 on a domain and forest function level which itself is higher than its actual DC function level (2008_R2). This won't work!")
domainFunctionality = dom_for_fun_level
forestFunctionality = dom_for_fun_level
# Set the NTDS settings DN manually - in order to have it already around
# before the provisioned tree exists and we connect
samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" % names.serverdn)
# Set the domain functionality levels onto the database.
# Various module (the password_hash module in particular) need
# to know what level of AD we are emulating.
# These will be fixed into the database via the database
# modifictions below, but we need them set from the start.
samdb.set_opaque_integer("domainFunctionality", domainFunctionality)
samdb.set_opaque_integer("forestFunctionality", forestFunctionality)
samdb.set_opaque_integer("domainControllerFunctionality",
domainControllerFunctionality)
samdb.set_domain_sid(str(names.domainsid))
samdb.set_invocation_id(invocationid)
logger.info("Adding DomainDN: %s" % names.domaindn)
# impersonate domain admin
admin_session_info = admin_session(lp, str(names.domainsid))
samdb.set_session_info(admin_session_info)
if names.domainguid is not None:
domainguid_line = "objectGUID: %s\n-" % names.domainguid
else:
domainguid_line = ""
descr = b64encode(get_domain_descriptor(names.domainsid)).decode('utf8')
setup_add_ldif(samdb, setup_path("provision_basedn.ldif"), {
"DOMAINDN": names.domaindn,
"DOMAINSID": str(names.domainsid),
"DESCRIPTOR": descr,
"DOMAINGUID": domainguid_line
})
setup_modify_ldif(samdb, setup_path("provision_basedn_modify.ldif"), {
"DOMAINDN": names.domaindn,
"CREATTIME": str(samba.unix2nttime(int(time.time()))),
"NEXTRID": str(next_rid),
"DEFAULTSITE": names.sitename,
"CONFIGDN": names.configdn,
"POLICYGUID": policyguid,
"DOMAIN_FUNCTIONALITY": str(domainFunctionality),
"SAMBA_VERSION_STRING": version,
"MIN_PWD_LENGTH": str(DEFAULT_MIN_PWD_LENGTH)
})
# If we are setting up a subdomain, then this has been replicated in, so we don't need to add it
if fill == FILL_FULL:
logger.info("Adding configuration container")
descr = b64encode(get_config_descriptor(names.domainsid)).decode('utf8')
setup_add_ldif(samdb, setup_path("provision_configuration_basedn.ldif"), {
"CONFIGDN": names.configdn,
"DESCRIPTOR": descr,
})
# The LDIF here was created when the Schema object was constructed
ignore_checks_oid = "local_oid:%s:0" % samba.dsdb.DSDB_CONTROL_SKIP_DUPLICATES_CHECK_OID
schema_controls = [
"provision:0",
"relax:0",
ignore_checks_oid
]
logger.info("Setting up sam.ldb schema")
samdb.add_ldif(schema.schema_dn_add, controls=schema_controls)
samdb.modify_ldif(schema.schema_dn_modify, controls=schema_controls)
samdb.write_prefixes_from_schema()
samdb.add_ldif(schema.schema_data, controls=schema_controls)
setup_add_ldif(samdb, setup_path("aggregate_schema.ldif"),
{"SCHEMADN": names.schemadn},
controls=schema_controls)
# Now register this container in the root of the forest
msg = ldb.Message(ldb.Dn(samdb, names.domaindn))
msg["subRefs"] = ldb.MessageElement(names.configdn, ldb.FLAG_MOD_ADD,
"subRefs")
samdb.invocation_id = invocationid
# If we are setting up a subdomain, then this has been replicated in, so we don't need to add it
if fill == FILL_FULL:
logger.info("Setting up sam.ldb configuration data")
partitions_descr = b64encode(get_config_partitions_descriptor(names.domainsid)).decode('utf8')
sites_descr = b64encode(get_config_sites_descriptor(names.domainsid)).decode('utf8')
ntdsquotas_descr = b64encode(get_config_ntds_quotas_descriptor(names.domainsid)).decode('utf8')
protected1_descr = b64encode(get_config_delete_protected1_descriptor(names.domainsid)).decode('utf8')
protected1wd_descr = b64encode(get_config_delete_protected1wd_descriptor(names.domainsid)).decode('utf8')
protected2_descr = b64encode(get_config_delete_protected2_descriptor(names.domainsid)).decode('utf8')
if "2008" in schema.base_schema:
# exclude 2012-specific changes if we're using a 2008 schema
incl_2012 = "#"
else:
incl_2012 = ""
setup_add_ldif(samdb, setup_path("provision_configuration.ldif"), {
"CONFIGDN": names.configdn,
"NETBIOSNAME": names.netbiosname,
"DEFAULTSITE": names.sitename,
"DNSDOMAIN": names.dnsdomain,
"DOMAIN": names.domain,
"SCHEMADN": names.schemadn,
"DOMAINDN": names.domaindn,
"SERVERDN": names.serverdn,
"FOREST_FUNCTIONALITY": str(forestFunctionality),
"DOMAIN_FUNCTIONALITY": str(domainFunctionality),
"NTDSQUOTAS_DESCRIPTOR": ntdsquotas_descr,
"LOSTANDFOUND_DESCRIPTOR": protected1wd_descr,
"SERVICES_DESCRIPTOR": protected1_descr,
"PHYSICALLOCATIONS_DESCRIPTOR": protected1wd_descr,
"FORESTUPDATES_DESCRIPTOR": protected1wd_descr,
"EXTENDEDRIGHTS_DESCRIPTOR": protected2_descr,
"PARTITIONS_DESCRIPTOR": partitions_descr,
"SITES_DESCRIPTOR": sites_descr,
})
setup_add_ldif(samdb, setup_path("extended-rights.ldif"), {
"CONFIGDN": names.configdn,
"INC2012": incl_2012,
})
logger.info("Setting up display specifiers")
display_specifiers_ldif = read_ms_ldif(
setup_path('display-specifiers/DisplaySpecifiers-Win2k8R2.txt'))
display_specifiers_ldif = substitute_var(display_specifiers_ldif,
{"CONFIGDN": names.configdn})
check_all_substituted(display_specifiers_ldif)
samdb.add_ldif(display_specifiers_ldif)
logger.info("Modifying display specifiers and extended rights")
setup_modify_ldif(samdb,
setup_path("provision_configuration_modify.ldif"), {
"CONFIGDN": names.configdn,
"DISPLAYSPECIFIERS_DESCRIPTOR": protected2_descr
})
logger.info("Adding users container")
users_desc = b64encode(get_domain_users_descriptor(names.domainsid)).decode('utf8')
setup_add_ldif(samdb, setup_path("provision_users_add.ldif"), {
"DOMAINDN": names.domaindn,
"USERS_DESCRIPTOR": users_desc
})
logger.info("Modifying users container")
setup_modify_ldif(samdb, setup_path("provision_users_modify.ldif"), {
"DOMAINDN": names.domaindn})
logger.info("Adding computers container")
computers_desc = b64encode(get_domain_computers_descriptor(names.domainsid)).decode('utf8')
setup_add_ldif(samdb, setup_path("provision_computers_add.ldif"), {
"DOMAINDN": names.domaindn,
"COMPUTERS_DESCRIPTOR": computers_desc
})
logger.info("Modifying computers container")
setup_modify_ldif(samdb,
setup_path("provision_computers_modify.ldif"), {
"DOMAINDN": names.domaindn})
logger.info("Setting up sam.ldb data")
infrastructure_desc = b64encode(get_domain_infrastructure_descriptor(names.domainsid)).decode('utf8')
lostandfound_desc = b64encode(get_domain_delete_protected2_descriptor(names.domainsid)).decode('utf8')
system_desc = b64encode(get_domain_delete_protected1_descriptor(names.domainsid)).decode('utf8')
builtin_desc = b64encode(get_domain_builtin_descriptor(names.domainsid)).decode('utf8')
controllers_desc = b64encode(get_domain_controllers_descriptor(names.domainsid)).decode('utf8')
setup_add_ldif(samdb, setup_path("provision.ldif"), {
"CREATTIME": str(samba.unix2nttime(int(time.time()))),
"DOMAINDN": names.domaindn,
"NETBIOSNAME": names.netbiosname,
"DEFAULTSITE": names.sitename,
"CONFIGDN": names.configdn,
"SERVERDN": names.serverdn,
"RIDAVAILABLESTART": str(next_rid + 600),
"POLICYGUID_DC": policyguid_dc,
"INFRASTRUCTURE_DESCRIPTOR": infrastructure_desc,
"LOSTANDFOUND_DESCRIPTOR": lostandfound_desc,
"SYSTEM_DESCRIPTOR": system_desc,
"BUILTIN_DESCRIPTOR": builtin_desc,
"DOMAIN_CONTROLLERS_DESCRIPTOR": controllers_desc,
})
# If we are setting up a subdomain, then this has been replicated in, so we don't need to add it
if fill == FILL_FULL:
managedservice_descr = b64encode(get_managed_service_accounts_descriptor(names.domainsid)).decode('utf8')
setup_modify_ldif(samdb,
setup_path("provision_configuration_references.ldif"), {
"CONFIGDN": names.configdn,
"SCHEMADN": names.schemadn})
logger.info("Setting up well known security principals")
protected1wd_descr = b64encode(get_config_delete_protected1wd_descriptor(names.domainsid)).decode('utf8')
setup_add_ldif(samdb, setup_path("provision_well_known_sec_princ.ldif"), {
"CONFIGDN": names.configdn,
"WELLKNOWNPRINCIPALS_DESCRIPTOR": protected1wd_descr,
}, controls=["relax:0", "provision:0"])
if fill == FILL_FULL or fill == FILL_SUBDOMAIN:
setup_modify_ldif(samdb,
setup_path("provision_basedn_references.ldif"), {
"DOMAINDN": names.domaindn,
"MANAGEDSERVICE_DESCRIPTOR": managedservice_descr
})
logger.info("Setting up sam.ldb users and groups")
setup_add_ldif(samdb, setup_path("provision_users.ldif"), {
"DOMAINDN": names.domaindn,
"DOMAINSID": str(names.domainsid),
"ADMINPASS_B64": b64encode(adminpass.encode('utf-16-le')).decode('utf8'),
"KRBTGTPASS_B64": b64encode(krbtgtpass.encode('utf-16-le')).decode('utf8')
}, controls=["relax:0", "provision:0"])
logger.info("Setting up self join")
setup_self_join(samdb, admin_session_info, names=names, fill=fill,
invocationid=invocationid,
dns_backend=dns_backend,
dnspass=dnspass,
machinepass=machinepass,
domainsid=names.domainsid,
next_rid=next_rid,
dc_rid=dc_rid,
policyguid=policyguid,
policyguid_dc=policyguid_dc,
domainControllerFunctionality=domainControllerFunctionality,
ntdsguid=ntdsguid)
ntds_dn = "CN=NTDS Settings,%s" % names.serverdn
names.ntdsguid = samdb.searchone(basedn=ntds_dn,
attribute="objectGUID", expression="", scope=ldb.SCOPE_BASE).decode('utf8')
assert isinstance(names.ntdsguid, str)
return samdb
SYSVOL_ACL = "O:LAG:BAD:P(A;OICI;0x001f01ff;;;BA)(A;OICI;0x001200a9;;;SO)(A;OICI;0x001f01ff;;;SY)(A;OICI;0x001200a9;;;AU)"
POLICIES_ACL = "O:LAG:BAD:P(A;OICI;0x001f01ff;;;BA)(A;OICI;0x001200a9;;;SO)(A;OICI;0x001f01ff;;;SY)(A;OICI;0x001200a9;;;AU)(A;OICI;0x001301bf;;;PA)"
SYSVOL_SERVICE = "sysvol"
def set_dir_acl(path, acl, lp, domsid, use_ntvfs, passdb, service=SYSVOL_SERVICE):
session_info = system_session_unix()
setntacl(lp, path, acl, domsid, session_info, use_ntvfs=use_ntvfs, skip_invalid_chown=True, passdb=passdb, service=service)
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
setntacl(lp, os.path.join(root, name), acl, domsid, session_info,
use_ntvfs=use_ntvfs, skip_invalid_chown=True, passdb=passdb, service=service)
for name in dirs:
setntacl(lp, os.path.join(root, name), acl, domsid, session_info,
use_ntvfs=use_ntvfs, skip_invalid_chown=True, passdb=passdb, service=service)
def set_gpos_acl(sysvol, dnsdomain, domainsid, domaindn, samdb, lp, use_ntvfs, passdb):
"""Set ACL on the sysvol/<dnsname>/Policies folder and the policy
folders beneath.
:param sysvol: Physical path for the sysvol folder
:param dnsdomain: The DNS name of the domain
:param domainsid: The SID of the domain
:param domaindn: The DN of the domain (ie. DC=...)
:param samdb: An LDB object on the SAM db
:param lp: an LP object
"""
# Set ACL for GPO root folder
root_policy_path = os.path.join(sysvol, dnsdomain, "Policies")
session_info = system_session_unix()
setntacl(lp, root_policy_path, POLICIES_ACL, str(domainsid), session_info,
use_ntvfs=use_ntvfs, skip_invalid_chown=True, passdb=passdb, service=SYSVOL_SERVICE)
res = samdb.search(base="CN=Policies,CN=System,%s" %(domaindn),
attrs=["cn", "nTSecurityDescriptor"],
expression="", scope=ldb.SCOPE_ONELEVEL)
for policy in res:
acl = ndr_unpack(security.descriptor,
policy["nTSecurityDescriptor"][0]).as_sddl()
policy_path = getpolicypath(sysvol, dnsdomain, str(policy["cn"]))
set_dir_acl(policy_path, dsacl2fsacl(acl, domainsid), lp,
str(domainsid), use_ntvfs,
passdb=passdb)
def setsysvolacl(samdb, netlogon, sysvol, uid, gid, domainsid, dnsdomain,
domaindn, lp, use_ntvfs):
"""Set the ACL for the sysvol share and the subfolders
:param samdb: An LDB object on the SAM db
:param netlogon: Physical path for the netlogon folder
:param sysvol: Physical path for the sysvol folder
:param uid: The UID of the "Administrator" user
:param gid: The GID of the "Domain adminstrators" group
:param domainsid: The SID of the domain
:param dnsdomain: The DNS name of the domain
:param domaindn: The DN of the domain (ie. DC=...)
"""
s4_passdb = None
if not use_ntvfs:
s3conf = s3param.get_context()
s3conf.load(lp.configfile)
file = tempfile.NamedTemporaryFile(dir=os.path.abspath(sysvol))
try:
try:
smbd.set_simple_acl(file.name, 0o755, system_session_unix(), gid)
except OSError:
if not smbd.have_posix_acls():
# This clue is only strictly correct for RPM and
# Debian-like Linux systems, but hopefully other users
# will get enough clue from it.
raise ProvisioningError("Samba was compiled without the posix ACL support that s3fs requires. "
"Try installing libacl1-dev or libacl-devel, then re-run configure and make.")
raise ProvisioningError("Your filesystem or build does not support posix ACLs, which s3fs requires. "
"Try the mounting the filesystem with the 'acl' option.")
try:
smbd.chown(file.name, uid, gid, system_session_unix())
except OSError:
raise ProvisioningError("Unable to chown a file on your filesystem. "
"You may not be running provision as root.")
finally:
file.close()
# This will ensure that the smbd code we are running when setting ACLs
# is initialised with the smb.conf
s3conf = s3param.get_context()
s3conf.load(lp.configfile)
# ensure we are using the right samba_dsdb passdb backend, no matter what
s3conf.set("passdb backend", "samba_dsdb:%s" % samdb.url)
passdb.reload_static_pdb()
# ensure that we init the samba_dsdb backend, so the domain sid is
# marked in secrets.tdb
s4_passdb = passdb.PDB(s3conf.get("passdb backend"))
# now ensure everything matches correctly, to avoid wierd issues
if passdb.get_global_sam_sid() != domainsid:
raise ProvisioningError('SID as seen by smbd [%s] does not match SID as seen by the provision script [%s]!' % (passdb.get_global_sam_sid(), domainsid))
domain_info = s4_passdb.domain_info()
if domain_info["dom_sid"] != domainsid:
raise ProvisioningError('SID as seen by pdb_samba_dsdb [%s] does not match SID as seen by the provision script [%s]!' % (domain_info["dom_sid"], domainsid))
if domain_info["dns_domain"].upper() != dnsdomain.upper():
raise ProvisioningError('Realm as seen by pdb_samba_dsdb [%s] does not match Realm as seen by the provision script [%s]!' % (domain_info["dns_domain"].upper(), dnsdomain.upper()))
try:
if use_ntvfs:
os.chown(sysvol, -1, gid)
except OSError:
canchown = False
else:
canchown = True
# use admin sid dn as user dn, since admin should own most of the files,
# the operation will be much faster
userdn = '<SID={}-{}>'.format(domainsid, security.DOMAIN_RID_ADMINISTRATOR)
flags = (auth.AUTH_SESSION_INFO_DEFAULT_GROUPS |
auth.AUTH_SESSION_INFO_AUTHENTICATED |
auth.AUTH_SESSION_INFO_SIMPLE_PRIVILEGES)
session_info = auth.user_session(samdb, lp_ctx=lp, dn=userdn,
session_info_flags=flags)
auth.session_info_set_unix(session_info,
lp_ctx=lp,
user_name="Administrator",
uid=uid,
gid=gid)
def _setntacl(path):
"""A helper to reuse args"""
return setntacl(
lp, path, SYSVOL_ACL, str(domainsid), session_info,
use_ntvfs=use_ntvfs, skip_invalid_chown=True, passdb=s4_passdb,
service=SYSVOL_SERVICE)
# Set the SYSVOL_ACL on the sysvol folder and subfolder (first level)
_setntacl(sysvol)
for root, dirs, files in os.walk(sysvol, topdown=False):
for name in files:
if use_ntvfs and canchown:
os.chown(os.path.join(root, name), -1, gid)
_setntacl(os.path.join(root, name))
for name in dirs:
if use_ntvfs and canchown:
os.chown(os.path.join(root, name), -1, gid)
_setntacl(os.path.join(root, name))
# Set acls on Policy folder and policies folders
set_gpos_acl(sysvol, dnsdomain, domainsid, domaindn, samdb, lp, use_ntvfs, passdb=s4_passdb)
def acl_type(direct_db_access):
if direct_db_access:
return "DB"
else:
return "VFS"
def check_dir_acl(path, acl, lp, domainsid, direct_db_access):
session_info = system_session_unix()
fsacl = getntacl(lp, path, session_info, direct_db_access=direct_db_access, service=SYSVOL_SERVICE)
fsacl_sddl = fsacl.as_sddl(domainsid)
if fsacl_sddl != acl:
raise ProvisioningError('%s ACL on GPO directory %s %s does not match expected value %s from GPO object' % (acl_type(direct_db_access), path, fsacl_sddl, acl))
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
fsacl = getntacl(lp, os.path.join(root, name), session_info,
direct_db_access=direct_db_access, service=SYSVOL_SERVICE)
if fsacl is None:
raise ProvisioningError('%s ACL on GPO file %s not found!' %
(acl_type(direct_db_access),
os.path.join(root, name)))
fsacl_sddl = fsacl.as_sddl(domainsid)
if fsacl_sddl != acl:
raise ProvisioningError('%s ACL on GPO file %s %s does not match expected value %s from GPO object' % (acl_type(direct_db_access), os.path.join(root, name), fsacl_sddl, acl))
for name in dirs:
fsacl = getntacl(lp, os.path.join(root, name), session_info,
direct_db_access=direct_db_access, service=SYSVOL_SERVICE)
if fsacl is None:
raise ProvisioningError('%s ACL on GPO directory %s not found!'
% (acl_type(direct_db_access),
os.path.join(root, name)))
fsacl_sddl = fsacl.as_sddl(domainsid)
if fsacl_sddl != acl:
raise ProvisioningError('%s ACL on GPO directory %s %s does not match expected value %s from GPO object' % (acl_type(direct_db_access), os.path.join(root, name), fsacl_sddl, acl))
def check_gpos_acl(sysvol, dnsdomain, domainsid, domaindn, samdb, lp,
direct_db_access):
"""Set ACL on the sysvol/<dnsname>/Policies folder and the policy
folders beneath.
:param sysvol: Physical path for the sysvol folder
:param dnsdomain: The DNS name of the domain
:param domainsid: The SID of the domain
:param domaindn: The DN of the domain (ie. DC=...)
:param samdb: An LDB object on the SAM db
:param lp: an LP object
"""
# Set ACL for GPO root folder
root_policy_path = os.path.join(sysvol, dnsdomain, "Policies")
session_info = system_session_unix()
fsacl = getntacl(lp, root_policy_path, session_info,
direct_db_access=direct_db_access, service=SYSVOL_SERVICE)
if fsacl is None:
raise ProvisioningError('DB ACL on policy root %s %s not found!' % (acl_type(direct_db_access), root_policy_path))
fsacl_sddl = fsacl.as_sddl(domainsid)
if fsacl_sddl != POLICIES_ACL:
raise ProvisioningError('%s ACL on policy root %s %s does not match expected value %s from provision' % (acl_type(direct_db_access), root_policy_path, fsacl_sddl, fsacl))
res = samdb.search(base="CN=Policies,CN=System,%s" %(domaindn),
attrs=["cn", "nTSecurityDescriptor"],
expression="", scope=ldb.SCOPE_ONELEVEL)
for policy in res:
acl = ndr_unpack(security.descriptor,
policy["nTSecurityDescriptor"][0]).as_sddl()
policy_path = getpolicypath(sysvol, dnsdomain, str(policy["cn"]))
check_dir_acl(policy_path, dsacl2fsacl(acl, domainsid), lp,
domainsid, direct_db_access)
def checksysvolacl(samdb, netlogon, sysvol, domainsid, dnsdomain, domaindn,
lp):
"""Set the ACL for the sysvol share and the subfolders
:param samdb: An LDB object on the SAM db
:param netlogon: Physical path for the netlogon folder
:param sysvol: Physical path for the sysvol folder
:param uid: The UID of the "Administrator" user
:param gid: The GID of the "Domain adminstrators" group
:param domainsid: The SID of the domain
:param dnsdomain: The DNS name of the domain
:param domaindn: The DN of the domain (ie. DC=...)
"""
# This will ensure that the smbd code we are running when setting ACLs is initialised with the smb.conf
s3conf = s3param.get_context()
s3conf.load(lp.configfile)
# ensure we are using the right samba_dsdb passdb backend, no matter what
s3conf.set("passdb backend", "samba_dsdb:%s" % samdb.url)
# ensure that we init the samba_dsdb backend, so the domain sid is marked in secrets.tdb
s4_passdb = passdb.PDB(s3conf.get("passdb backend"))
# now ensure everything matches correctly, to avoid wierd issues
if passdb.get_global_sam_sid() != domainsid:
raise ProvisioningError('SID as seen by smbd [%s] does not match SID as seen by the provision script [%s]!' % (passdb.get_global_sam_sid(), domainsid))
domain_info = s4_passdb.domain_info()
if domain_info["dom_sid"] != domainsid:
raise ProvisioningError('SID as seen by pdb_samba_dsdb [%s] does not match SID as seen by the provision script [%s]!' % (domain_info["dom_sid"], domainsid))
if domain_info["dns_domain"].upper() != dnsdomain.upper():
raise ProvisioningError('Realm as seen by pdb_samba_dsdb [%s] does not match Realm as seen by the provision script [%s]!' % (domain_info["dns_domain"].upper(), dnsdomain.upper()))
# Ensure we can read this directly, and via the smbd VFS
session_info = system_session_unix()
for direct_db_access in [True, False]:
# Check the SYSVOL_ACL on the sysvol folder and subfolder (first level)
for dir_path in [os.path.join(sysvol, dnsdomain), netlogon]:
fsacl = getntacl(lp, dir_path, session_info, direct_db_access=direct_db_access, service=SYSVOL_SERVICE)
if fsacl is None:
raise ProvisioningError('%s ACL on sysvol directory %s not found!' % (acl_type(direct_db_access), dir_path))
fsacl_sddl = fsacl.as_sddl(domainsid)
if fsacl_sddl != SYSVOL_ACL:
raise ProvisioningError('%s ACL on sysvol directory %s %s does not match expected value %s from provision' % (acl_type(direct_db_access), dir_path, fsacl_sddl, SYSVOL_ACL))
# Check acls on Policy folder and policies folders
check_gpos_acl(sysvol, dnsdomain, domainsid, domaindn, samdb, lp,
direct_db_access)
def interface_ips_v4(lp, all_interfaces=False):
"""return only IPv4 IPs"""
ips = samba.interface_ips(lp, all_interfaces)
ret = []
for i in ips:
if i.find(':') == -1:
ret.append(i)
return ret
def interface_ips_v6(lp):
"""return only IPv6 IPs"""
ips = samba.interface_ips(lp, False)
ret = []
for i in ips:
if i.find(':') != -1:
ret.append(i)
return ret
def provision_fill(samdb, secrets_ldb, logger, names, paths,
schema=None,
targetdir=None, samdb_fill=FILL_FULL,
hostip=None, hostip6=None,
next_rid=1000, dc_rid=None, adminpass=None, krbtgtpass=None,
domainguid=None, policyguid=None, policyguid_dc=None,
invocationid=None, machinepass=None, ntdsguid=None,
dns_backend=None, dnspass=None,
serverrole=None, dom_for_fun_level=None,
am_rodc=False, lp=None, use_ntvfs=False,
skip_sysvolacl=False, backend_store=None,
backend_store_size=None):
# create/adapt the group policy GUIDs
# Default GUID for default policy are described at
# "How Core Group Policy Works"
# http://technet.microsoft.com/en-us/library/cc784268%28WS.10%29.aspx
if policyguid is None:
policyguid = DEFAULT_POLICY_GUID
policyguid = policyguid.upper()
if policyguid_dc is None:
policyguid_dc = DEFAULT_DC_POLICY_GUID
policyguid_dc = policyguid_dc.upper()
if invocationid is None:
invocationid = str(uuid.uuid4())
if krbtgtpass is None:
krbtgtpass = samba.generate_random_machine_password(128, 255)
if machinepass is None:
machinepass = samba.generate_random_machine_password(120, 120)
if dnspass is None:
dnspass = samba.generate_random_password(128, 255)
samdb.transaction_start()
try:
samdb = fill_samdb(samdb, lp, names, logger=logger,
schema=schema,
policyguid=policyguid, policyguid_dc=policyguid_dc,
fill=samdb_fill, adminpass=adminpass, krbtgtpass=krbtgtpass,
invocationid=invocationid, machinepass=machinepass,
dns_backend=dns_backend, dnspass=dnspass,
ntdsguid=ntdsguid, serverrole=serverrole,
dom_for_fun_level=dom_for_fun_level, am_rodc=am_rodc,
next_rid=next_rid, dc_rid=dc_rid,
backend_store=backend_store,
backend_store_size=backend_store_size)
# Set up group policies (domain policy and domain controller
# policy)
if serverrole == "active directory domain controller":
create_default_gpo(paths.sysvol, names.dnsdomain, policyguid,
policyguid_dc)
except:
samdb.transaction_cancel()
raise
else:
samdb.transaction_commit()
if serverrole == "active directory domain controller":
# Continue setting up sysvol for GPO. This appears to require being
# outside a transaction.
if not skip_sysvolacl:
setsysvolacl(samdb, paths.netlogon, paths.sysvol, paths.root_uid,
paths.root_gid, names.domainsid, names.dnsdomain,
names.domaindn, lp, use_ntvfs)
else:
logger.info("Setting acl on sysvol skipped")
secretsdb_self_join(secrets_ldb, domain=names.domain,
realm=names.realm, dnsdomain=names.dnsdomain,
netbiosname=names.netbiosname, domainsid=names.domainsid,
machinepass=machinepass, secure_channel_type=SEC_CHAN_BDC)
# Now set up the right msDS-SupportedEncryptionTypes into the DB
# In future, this might be determined from some configuration
kerberos_enctypes = str(ENC_ALL_TYPES)
try:
msg = ldb.Message(ldb.Dn(samdb,
samdb.searchone("distinguishedName",
expression="samAccountName=%s$" % names.netbiosname,
scope=ldb.SCOPE_SUBTREE).decode('utf8')))
msg["msDS-SupportedEncryptionTypes"] = ldb.MessageElement(
elements=kerberos_enctypes, flags=ldb.FLAG_MOD_REPLACE,
name="msDS-SupportedEncryptionTypes")
samdb.modify(msg)
except ldb.LdbError as e:
(enum, estr) = e.args
if enum != ldb.ERR_NO_SUCH_ATTRIBUTE:
# It might be that this attribute does not exist in this schema
raise
setup_ad_dns(samdb, secrets_ldb, names, paths, lp, logger,
hostip=hostip, hostip6=hostip6, dns_backend=dns_backend,
dnspass=dnspass, os_level=dom_for_fun_level,
targetdir=targetdir, fill_level=samdb_fill,
backend_store=backend_store)
domainguid = samdb.searchone(basedn=samdb.get_default_basedn(),
attribute="objectGUID").decode('utf8')
assert isinstance(domainguid, str)
lastProvisionUSNs = get_last_provision_usn(samdb)
maxUSN = get_max_usn(samdb, str(names.rootdn))
if lastProvisionUSNs is not None:
update_provision_usn(samdb, 0, maxUSN, invocationid, 1)
else:
set_provision_usn(samdb, 0, maxUSN, invocationid)
logger.info("Setting up sam.ldb rootDSE marking as synchronized")
setup_modify_ldif(samdb, setup_path("provision_rootdse_modify.ldif"),
{'NTDSGUID': names.ntdsguid})
# fix any dangling GUIDs from the provision
logger.info("Fixing provision GUIDs")
chk = dbcheck(samdb, samdb_schema=samdb, verbose=False, fix=True, yes=True,
quiet=True)
samdb.transaction_start()
try:
# a small number of GUIDs are missing because of ordering issues in the
# provision code
for schema_obj in ['CN=Domain', 'CN=Organizational-Person', 'CN=Contact', 'CN=inetOrgPerson']:
chk.check_database(DN="%s,%s" % (schema_obj, names.schemadn),
scope=ldb.SCOPE_BASE,
attrs=['defaultObjectCategory'])
chk.check_database(DN="CN=IP Security,CN=System,%s" % names.domaindn,
scope=ldb.SCOPE_ONELEVEL,
attrs=['ipsecOwnersReference',
'ipsecFilterReference',
'ipsecISAKMPReference',
'ipsecNegotiationPolicyReference',
'ipsecNFAReference'])
if chk.check_database(DN=names.schemadn, scope=ldb.SCOPE_SUBTREE,
attrs=['attributeId', 'governsId']) != 0:
raise ProvisioningError("Duplicate attributeId or governsId in schema. Must be fixed manually!!")
except:
samdb.transaction_cancel()
raise
else:
samdb.transaction_commit()
_ROLES_MAP = {
"ROLE_STANDALONE": "standalone server",
"ROLE_DOMAIN_MEMBER": "member server",
"ROLE_DOMAIN_BDC": "active directory domain controller",
"ROLE_DOMAIN_PDC": "active directory domain controller",
"dc": "active directory domain controller",
"member": "member server",
"domain controller": "active directory domain controller",
"active directory domain controller": "active directory domain controller",
"member server": "member server",
"standalone": "standalone server",
"standalone server": "standalone server",
}
def sanitize_server_role(role):
"""Sanitize a server role name.
:param role: Server role
:raise ValueError: If the role can not be interpreted
:return: Sanitized server role (one of "member server",
"active directory domain controller", "standalone server")
"""
try:
return _ROLES_MAP[role]
except KeyError:
raise ValueError(role)
def provision_fake_ypserver(logger, samdb, domaindn, netbiosname, nisdomain,
maxuid, maxgid):
"""Create AD entries for the fake ypserver.
This is needed for being able to manipulate posix attrs via ADUC.
"""
samdb.transaction_start()
try:
logger.info("Setting up fake yp server settings")
setup_add_ldif(samdb, setup_path("ypServ30.ldif"), {
"DOMAINDN": domaindn,
"NETBIOSNAME": netbiosname,
"NISDOMAIN": nisdomain,
})
except:
samdb.transaction_cancel()
raise
else:
samdb.transaction_commit()
def directory_create_or_exists(path, mode=0o755):
if not os.path.exists(path):
try:
os.mkdir(path, mode)
except OSError as e:
if e.errno in [errno.EEXIST]:
pass
else:
raise ProvisioningError("Failed to create directory %s: %s" % (path, e.strerror))
def determine_host_ip(logger, lp, hostip=None):
if hostip is None:
logger.info("Looking up IPv4 addresses")
hostips = interface_ips_v4(lp)
if len(hostips) > 0:
hostip = hostips[0]
if len(hostips) > 1:
logger.warning("More than one IPv4 address found. Using %s",
hostip)
if hostip == "127.0.0.1":
hostip = None
if hostip is None:
logger.warning("No IPv4 address will be assigned")
return hostip
def determine_host_ip6(logger, lp, hostip6=None):
if hostip6 is None:
logger.info("Looking up IPv6 addresses")
hostips = interface_ips_v6(lp)
if hostips:
hostip6 = hostips[0]
if len(hostips) > 1:
logger.warning("More than one IPv6 address found. Using %s", hostip6)
if hostip6 is None:
logger.warning("No IPv6 address will be assigned")
return hostip6
def provision(logger, session_info, smbconf=None,
targetdir=None, samdb_fill=FILL_FULL, realm=None, rootdn=None,
domaindn=None, schemadn=None, configdn=None, serverdn=None,
domain=None, hostname=None, hostip=None, hostip6=None, domainsid=None,
next_rid=1000, dc_rid=None, adminpass=None, ldapadminpass=None,
krbtgtpass=None, domainguid=None, policyguid=None, policyguid_dc=None,
dns_backend=None, dns_forwarder=None, dnspass=None,
invocationid=None, machinepass=None, ntdsguid=None,
root=None, nobody=None, users=None, backup=None,
sitename=None, serverrole=None, dom_for_fun_level=None,
useeadb=False, am_rodc=False, lp=None, use_ntvfs=False,
use_rfc2307=False, maxuid=None, maxgid=None, skip_sysvolacl=True,
base_schema="2012_R2",
plaintext_secrets=False, backend_store=None,
backend_store_size=None, batch_mode=False):
"""Provision samba4
:note: caution, this wipes all existing data!
"""
try:
serverrole = sanitize_server_role(serverrole)
except ValueError:
raise ProvisioningError('server role (%s) should be one of "active directory domain controller", "member server", "standalone server"' % serverrole)
if ldapadminpass is None:
# Make a new, random password between Samba and it's LDAP server
ldapadminpass = samba.generate_random_password(128, 255)
if backend_store is None:
backend_store = get_default_backend_store()
if domainsid is None:
domainsid = security.random_sid()
root_uid = get_root_uid([root or "root"], logger)
nobody_uid = findnss_uid([nobody or "nobody"])
users_gid = findnss_gid([users or "users", 'users', 'other', 'staff'])
root_gid = pwd.getpwuid(root_uid).pw_gid
try:
bind_gid = findnss_gid(["bind", "named"])
except KeyError:
bind_gid = None
if targetdir is not None:
smbconf = os.path.join(targetdir, "etc", "smb.conf")
elif smbconf is None:
smbconf = samba.param.default_path()
if not os.path.exists(os.path.dirname(smbconf)):
os.makedirs(os.path.dirname(smbconf))
server_services = []
global_param = {}
if use_rfc2307:
global_param["idmap_ldb:use rfc2307"] = ["yes"]
if dns_backend != "SAMBA_INTERNAL":
server_services.append("-dns")
else:
if dns_forwarder is not None:
global_param["dns forwarder"] = [dns_forwarder]
if use_ntvfs:
server_services.append("+smb")
server_services.append("-s3fs")
global_param["dcerpc endpoint servers"] = ["+winreg", "+srvsvc"]
if len(server_services) > 0:
global_param["server services"] = server_services
# only install a new smb.conf if there isn't one there already
if os.path.exists(smbconf):
# if Samba Team members can't figure out the weird errors
# loading an empty smb.conf gives, then we need to be smarter.
# Pretend it just didn't exist --abartlet
f = open(smbconf, 'r')
try:
data = f.read().lstrip()
finally:
f.close()
if data is None or data == "":
make_smbconf(smbconf, hostname, domain, realm,
targetdir, serverrole=serverrole,
eadb=useeadb, use_ntvfs=use_ntvfs,
lp=lp, global_param=global_param)
else:
make_smbconf(smbconf, hostname, domain, realm, targetdir,
serverrole=serverrole,
eadb=useeadb, use_ntvfs=use_ntvfs, lp=lp, global_param=global_param)
if lp is None:
lp = samba.param.LoadParm()
lp.load(smbconf)
names = guess_names(lp=lp, hostname=hostname, domain=domain,
dnsdomain=realm, serverrole=serverrole, domaindn=domaindn,
configdn=configdn, schemadn=schemadn, serverdn=serverdn,
sitename=sitename, rootdn=rootdn, domain_names_forced=(samdb_fill == FILL_DRS))
paths = provision_paths_from_lp(lp, names.dnsdomain)
paths.bind_gid = bind_gid
paths.root_uid = root_uid
paths.root_gid = root_gid
hostip = determine_host_ip(logger, lp, hostip)
hostip6 = determine_host_ip6(logger, lp, hostip6)
names.hostip = hostip
names.hostip6 = hostip6
names.domainguid = domainguid
names.domainsid = domainsid
names.forestsid = domainsid
if serverrole is None:
serverrole = lp.get("server role")
directory_create_or_exists(paths.private_dir, 0o700)
directory_create_or_exists(paths.binddns_dir, 0o770)
directory_create_or_exists(os.path.join(paths.private_dir, "tls"))
directory_create_or_exists(paths.state_dir)
if not plaintext_secrets:
setup_encrypted_secrets_key(paths.encrypted_secrets_key_path)
if paths.sysvol and not os.path.exists(paths.sysvol):
os.makedirs(paths.sysvol, 0o775)
schema = Schema(domainsid, invocationid=invocationid,
schemadn=names.schemadn, base_schema=base_schema)
provision_backend = LDBBackend(paths=paths,
lp=lp,
names=names, logger=logger)
provision_backend.init()
provision_backend.start()
# only install a new shares config db if there is none
if not os.path.exists(paths.shareconf):
logger.info("Setting up share.ldb")
share_ldb = Ldb(paths.shareconf, session_info=session_info, lp=lp)
share_ldb.load_ldif_file_add(setup_path("share.ldif"))
logger.info("Setting up secrets.ldb")
secrets_ldb = setup_secretsdb(paths,
session_info=session_info, lp=lp)
try:
logger.info("Setting up the registry")
setup_registry(paths.hklm, session_info, lp=lp)
logger.info("Setting up the privileges database")
setup_privileges(paths.privilege, session_info, lp=lp)
logger.info("Setting up idmap db")
idmap = setup_idmapdb(paths.idmapdb, session_info=session_info, lp=lp)
setup_name_mappings(idmap, sid=str(domainsid),
root_uid=root_uid, nobody_uid=nobody_uid,
users_gid=users_gid, root_gid=root_gid)
logger.info("Setting up SAM db")
samdb = setup_samdb(paths.samdb, session_info,
provision_backend, lp, names, logger=logger,
serverrole=serverrole,
schema=schema, fill=samdb_fill, am_rodc=am_rodc,
plaintext_secrets=plaintext_secrets,
backend_store=backend_store,
backend_store_size=backend_store_size,
batch_mode=batch_mode)
if serverrole == "active directory domain controller":
if paths.netlogon is None:
raise MissingShareError("netlogon", paths.smbconf)
if paths.sysvol is None:
raise MissingShareError("sysvol", paths.smbconf)
if not os.path.isdir(paths.netlogon):
os.makedirs(paths.netlogon, 0o755)
if adminpass is None:
adminpass = samba.generate_random_password(12, 32)
adminpass_generated = True
else:
if isinstance(adminpass, bytes):
adminpass = adminpass.decode('utf-8')
adminpass_generated = False
if samdb_fill == FILL_FULL:
provision_fill(samdb, secrets_ldb, logger, names, paths,
schema=schema, targetdir=targetdir, samdb_fill=samdb_fill,
hostip=hostip, hostip6=hostip6,
next_rid=next_rid, dc_rid=dc_rid, adminpass=adminpass,
krbtgtpass=krbtgtpass,
policyguid=policyguid, policyguid_dc=policyguid_dc,
invocationid=invocationid, machinepass=machinepass,
ntdsguid=ntdsguid, dns_backend=dns_backend,
dnspass=dnspass, serverrole=serverrole,
dom_for_fun_level=dom_for_fun_level, am_rodc=am_rodc,
lp=lp, use_ntvfs=use_ntvfs,
skip_sysvolacl=skip_sysvolacl,
backend_store=backend_store,
backend_store_size=backend_store_size)
if not is_heimdal_built():
create_kdc_conf(paths.kdcconf, realm, domain, os.path.dirname(lp.get("log file")))
logger.info("The Kerberos KDC configuration for Samba AD is "
"located at %s", paths.kdcconf)
create_krb5_conf(paths.krb5conf,
dnsdomain=names.dnsdomain, hostname=names.hostname,
realm=names.realm)
logger.info("A Kerberos configuration suitable for Samba AD has been "
"generated at %s", paths.krb5conf)
logger.info("Merge the contents of this file with your system "
"krb5.conf or replace it with this one. Do not create a "
"symlink!")
if serverrole == "active directory domain controller":
create_dns_update_list(lp, logger, paths)
backend_result = provision_backend.post_setup()
provision_backend.shutdown()
except:
secrets_ldb.transaction_cancel()
raise
# Now commit the secrets.ldb to disk
secrets_ldb.transaction_commit()
# the commit creates the dns.keytab in the private directory
create_dns_dir_keytab_link(logger, paths)
result = ProvisionResult()
result.server_role = serverrole
result.domaindn = domaindn
result.paths = paths
result.names = names
result.lp = lp
result.samdb = samdb
result.idmap = idmap
result.domainsid = str(domainsid)
if samdb_fill == FILL_FULL:
result.adminpass_generated = adminpass_generated
result.adminpass = adminpass
else:
result.adminpass_generated = False
result.adminpass = None
result.backend_result = backend_result
if use_rfc2307:
provision_fake_ypserver(logger=logger, samdb=samdb,
domaindn=names.domaindn, netbiosname=names.netbiosname,
nisdomain=names.domain.lower(), maxuid=maxuid, maxgid=maxgid)
return result
def provision_become_dc(smbconf=None, targetdir=None, realm=None,
rootdn=None, domaindn=None, schemadn=None,
configdn=None, serverdn=None, domain=None,
hostname=None, domainsid=None,
machinepass=None, dnspass=None,
dns_backend=None, sitename=None, debuglevel=1,
use_ntvfs=False):
logger = logging.getLogger("provision")
samba.set_debug_level(debuglevel)
res = provision(logger, system_session(),
smbconf=smbconf, targetdir=targetdir, samdb_fill=FILL_DRS,
realm=realm, rootdn=rootdn, domaindn=domaindn, schemadn=schemadn,
configdn=configdn, serverdn=serverdn, domain=domain,
hostname=hostname, hostip=None, domainsid=domainsid,
machinepass=machinepass,
serverrole="active directory domain controller",
sitename=sitename, dns_backend=dns_backend, dnspass=dnspass,
use_ntvfs=use_ntvfs)
res.lp.set("debuglevel", str(debuglevel))
return res
def create_krb5_conf(path, dnsdomain, hostname, realm):
"""Write out a file containing a valid krb5.conf file
:param path: Path of the new krb5.conf file.
:param dnsdomain: DNS Domain name
:param hostname: Local hostname
:param realm: Realm name
"""
setup_file(setup_path("krb5.conf"), path, {
"DNSDOMAIN": dnsdomain,
"HOSTNAME": hostname,
"REALM": realm,
})
class ProvisioningError(Exception):
"""A generic provision error."""
def __init__(self, value):
self.value = value
def __str__(self):
return "ProvisioningError: " + self.value
class InvalidNetbiosName(Exception):
"""A specified name was not a valid NetBIOS name."""
def __init__(self, name):
super(InvalidNetbiosName, self).__init__(
"The name '%r' is not a valid NetBIOS name" % name)
class MissingShareError(ProvisioningError):
def __init__(self, name, smbconf):
super(MissingShareError, self).__init__(
"Existing smb.conf does not have a [%s] share, but you are "
"configuring a DC. Please remove %s or add the share manually." %
(name, smbconf))
| gpl-3.0 |
fvpolpeta/devide | modules/writers/DICOMWriter.py | 7 | 6671 | # Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
# Random development notes:
# * SetFileDimensionality(2) if you want multiple slices written from
# a single volume
# * just generate im%05d.dcm filenames, as many as there are slices
# * study / series UIDs are auto generated
from module_base import ModuleBase
from module_mixins import \
ScriptedConfigModuleMixin
import module_utils
import os
import vtk
import vtkgdcm
import wx # need this for wx.SAVE
RADIOBOX_IDX, DIR_IDX, FILE_IDX = 0, 1, 2
class DICOMWriter(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._writer = vtkgdcm.vtkGDCMImageWriter()
# NB NB NB: for now we're SWITCHING off the VTK-compatible
# Y-flip, until the X-mirror issues can be solved.
self._writer.SetFileLowerLeft(1)
module_utils.setup_vtk_object_progress(self, self._writer,
'Writing DICOM data')
self._caster = vtk.vtkImageCast()
self._caster.SetOutputScalarTypeToShort()
module_utils.setup_vtk_object_progress(self, self._caster,
'Casting DICOM data to short')
self._input_data = None
self._input_metadata = None
self._config.output_mode = 0
self._config.output_directory = ''
self._config.output_filename = ''
self._config.cast_to_short = True
config_list = [
('Output mode:', 'output_mode', 'base:int',
'radiobox', 'Output mode',
['Slice-per-file (directory)',
'Multi-slice per file (file)']),
('Output directory:', 'output_directory', 'base:str',
'dirbrowser',
'Directory that takes slice-per-file output'),
('Output filename:', 'output_filename', 'base:str',
'filebrowser',
'Output filename for multi-slice per file output.',
{'fileMode' : wx.SAVE,
'fileMask' : 'DICOM file (*.dcm)|*.dcm|'
'All files (*.*)|*.*'}
),
('Cast to short:', 'cast_to_short', 'base:bool',
'checkbox',
'Should the data be cast to signed 16-bit (short), '
'common for DICOM.')
]
ScriptedConfigModuleMixin.__init__(self, config_list,
{'Module (self)' : self})
self.sync_module_logic_with_config()
def close(self):
ScriptedConfigModuleMixin.close(self)
ModuleBase.close(self)
del self._writer
def get_input_descriptions(self):
return ('VTK image data', 'Medical Meta Data')
def set_input(self, idx, input_stream):
if idx == 0:
self._input_data = input_stream
if input_stream is None:
# we explicitly disconnect our filters too
self._caster.SetInput(None)
self._writer.SetInput(None)
else:
self._input_metadata = input_stream
def get_output_descriptions(self):
return ()
def get_output(self, idx):
raise RuntimeError
def execute_module(self):
if self._config.output_mode == 0:
# slice-per-file mode
if not os.path.isdir(self._config.output_directory):
raise RuntimeError(
'Please specify a valid output directory.')
# generate filenamelist with as many entries as there are
# z-slices
self._input_data.UpdateInformation() # shouldn't be nec.
z_len = self._input_data.GetDimensions()[2]
odir = self._config.output_directory
fn_list = [os.path.join(odir,'im%05d.dcm' % (i,))
for i in range(1, z_len+1)]
fn_sa = vtk.vtkStringArray()
[fn_sa.InsertNextValue(fn) for fn in fn_list]
self._writer.SetFileNames(fn_sa)
self._writer.SetFileDimensionality(2)
else: # output_mode == 1, multi-slices per file
if not self._config.output_filename:
raise RuntimeError(
'Please specify an output filename.')
self._writer.SetFileName(self._config.output_filename)
self._writer.SetFileDimensionality(3)
# now setup the common stuff
mip = vtk.vtkMedicalImageProperties()
try:
mip.DeepCopy(self._input_metadata.medical_image_properties)
except AttributeError:
# this simply means that we have no input metadata
pass
self._writer.SetMedicalImageProperties(mip)
try:
self._writer.SetDirectionCosines(
self._input_metadata.direction_cosines)
except AttributeError:
# we have no input metadata, set the default
# identity matrix
m = vtk.vtkMatrix4x4()
self._writer.SetDirectionCosines(m)
if self._config.cast_to_short:
self._caster.SetInput(self._input_data)
# if we don't call this update, it crashes on Windows with
# GDCM 2.0.5, and everything else shortly before DeVIDE
# 8.5. The crash is inside the vtkGDCMImageWriter.
self._caster.Update()
self._writer.SetInput(self._caster.GetOutput())
else:
# just to be sure
self._caster.SetInput(None)
self._writer.SetInput(self._input_data)
self._writer.Write()
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def view(self):
# call to our parent
ScriptedConfigModuleMixin.view(self)
# get binding to radiobox
radiobox = self._getWidget(RADIOBOX_IDX)
# bind change event to it
radiobox.Bind(wx.EVT_RADIOBOX, self._handler_output_mode_radiobox)
# make sure the initial state is ok
self._toggle_filedir(radiobox.GetSelection())
def _handler_output_mode_radiobox(self, event):
self._toggle_filedir(event.GetEventObject().GetSelection())
def _toggle_filedir(self, idx):
dir_widget = self._getWidget(DIR_IDX)
file_widget = self._getWidget(FILE_IDX)
if idx == 0:
# user wants slice-per-file, so we enable dir widget
dir_widget.Enable()
file_widget.Disable()
else:
dir_widget.Disable()
file_widget.Enable()
| bsd-3-clause |
pgaref/ZooEmbedded | src/contrib/zkpython/src/test/connection_test.py | 30 | 4965 | #!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest, threading, re
import zookeeper, zktestbase
ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"}
class ConnectionTest(zktestbase.TestBase):
"""Test whether we can make a connection"""
def setUp(self):
pass
def testconnection(self):
cv = threading.Condition()
self.connected = False
def connection_watcher(handle, type, state, path):
cv.acquire()
self.connected = True
self.assertEqual(zookeeper.CONNECTED_STATE, state)
self.handle = handle
cv.notify()
cv.release()
cv.acquire()
ret = zookeeper.init(self.host, connection_watcher)
cv.wait(15.0)
cv.release()
self.assertEqual(self.connected, True, "Connection timed out to " + self.host)
self.assertEqual(zookeeper.CONNECTED_STATE, zookeeper.state(self.handle))
self.assertEqual(zookeeper.close(self.handle), zookeeper.OK)
# Trying to close the same handle twice is an error, and the C library will segfault on it
# so make sure this is caught at the Python module layer
self.assertRaises(zookeeper.ZooKeeperException,
zookeeper.close,
self.handle)
self.assertRaises(zookeeper.ZooKeeperException,
zookeeper.get,
self.handle,
"/")
def testhandlereuse(self):
"""
Test a) multiple concurrent connections b) reuse of closed handles
"""
cv = threading.Condition()
self.connected = False
def connection_watcher(handle, type, state, path):
cv.acquire()
self.connected = True
self.assertEqual(zookeeper.CONNECTED_STATE, state)
self.handle = handle
cv.notify()
cv.release()
cv.acquire()
handles = [ zookeeper.init(self.host) for i in xrange(10) ]
ret = zookeeper.init(self.host, connection_watcher)
cv.wait(15.0)
cv.release()
self.assertEqual(self.connected, True, "Connection timed out to " + self.host)
self.assertEqual(True, self.all( [ zookeeper.state(handle) == zookeeper.CONNECTED_STATE for handle in handles ] ),
"Not all connections succeeded")
oldhandle = handles[3]
zookeeper.close(oldhandle)
newhandle = zookeeper.init(self.host)
# This assertion tests *internal* behaviour; i.e. that the module
# correctly reuses closed handles. This is therefore implementation
# dependent.
self.assertEqual(newhandle, oldhandle, "Didn't get reused handle")
def testmanyhandles(self):
"""
Test the ability of the module to support many handles.
"""
# We'd like to do more, but currently the C client doesn't
# work with > 83 handles (fails to create a pipe) on MacOS 10.5.8
handles = [ zookeeper.init(self.host) for i in xrange(63) ]
cv = threading.Condition()
self.connected = False
def connection_watcher(handle, type, state, path):
cv.acquire()
self.connected = True
self.assertEqual(zookeeper.CONNECTED_STATE, state)
self.handle = handle
cv.notify()
cv.release()
cv.acquire()
ret = zookeeper.init(self.host, connection_watcher)
cv.wait(15.0)
cv.release()
self.assertEqual(self.connected, True, "Connection timed out to " + self.host)
for i,h in enumerate(handles):
path = "/zkpython-test-handles-%s" % str(i)
self.assertEqual(path, zookeeper.create(h, path, "", [ZOO_OPEN_ACL_UNSAFE], zookeeper.EPHEMERAL))
self.assertEqual(True, self.all( zookeeper.close(h) == zookeeper.OK for h in handles ))
def testversionstringexists(self):
self.assertTrue(hasattr(zookeeper, '__version__'))
self.assertTrue(re.match("\d.\d.\d", zookeeper.__version__))
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
40223145c2g18/c2g18 | wsgi/static/Brython2.1.0-20140419-113919/Lib/string.py | 734 | 9410 | """A collection of string constants.
Public module variables:
whitespace -- a string containing all ASCII whitespace
ascii_lowercase -- a string containing all ASCII lowercase letters
ascii_uppercase -- a string containing all ASCII uppercase letters
ascii_letters -- a string containing all ASCII letters
digits -- a string containing all ASCII decimal digits
hexdigits -- a string containing all ASCII hexadecimal digits
octdigits -- a string containing all ASCII octal digits
punctuation -- a string containing all ASCII punctuation characters
printable -- a string containing all ASCII characters considered printable
"""
import _string
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + ascii_letters + punctuation + whitespace
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s [,sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. If the optional second argument sep is absent or None,
runs of whitespace characters are replaced by a single space
and leading and trailing whitespace are removed, otherwise
sep is used to split and join the words.
"""
return (sep or ' ').join(x.capitalize() for x in s.split(sep))
####################################################################
import re as _re
from collections import ChainMap
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE)
class Template(metaclass=_TemplateMetaclass):
"""A string class for supporting $-substitutions."""
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
flags = _re.IGNORECASE
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(keepends=True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named') or mo.group('braced')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return mo.group()
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return mo.group()
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
########################################################################
# the Formatter class
# see PEP 3101 for details and purpose of this class
# The hard parts are reused from the C implementation. They're exposed as "_"
# prefixed methods of str.
# The overall parser is implemented in _string.formatter_parser.
# The field name parser is implemented in _string.formatter_field_name_split
class Formatter:
def format(self, format_string, *args, **kwargs):
return self.vformat(format_string, args, kwargs)
def vformat(self, format_string, args, kwargs):
used_args = set()
result = self._vformat(format_string, args, kwargs, used_args, 2)
self.check_unused_args(used_args, args, kwargs)
return result
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion)
# expand the format spec, if needed
format_spec = self._vformat(format_spec, args, kwargs,
used_args, recursion_depth-1)
# format the object and append to the result
result.append(self.format_field(obj, format_spec))
return ''.join(result)
def get_value(self, key, args, kwargs):
if isinstance(key, int):
return args[key]
else:
return kwargs[key]
def check_unused_args(self, used_args, args, kwargs):
pass
def format_field(self, value, format_spec):
return format(value, format_spec)
def convert_field(self, value, conversion):
# do any conversion on the resulting object
if conversion is None:
return value
elif conversion == 's':
return str(value)
elif conversion == 'r':
return repr(value)
elif conversion == 'a':
return ascii(value)
raise ValueError("Unknown conversion specifier {0!s}".format(conversion))
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
# literal_text can be zero length
# field_name can be None, in which case there's no
# object to format and output
# if field_name is not None, it is looked up, formatted
# with format_spec and conversion and then used
def parse(self, format_string):
return _string.formatter_parser(format_string)
# given a field_name, find the object it references.
# field_name: the field being looked up, e.g. "0.name"
# or "lookup[3]"
# used_args: a set of which args have been used
# args, kwargs: as passed in to vformat
def get_field(self, field_name, args, kwargs):
first, rest = _string.formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
# loop through the rest of the field_name, doing
# getattr or getitem as needed
for is_attr, i in rest:
if is_attr:
obj = getattr(obj, i)
else:
obj = obj[i]
return obj, first
| gpl-2.0 |
lsqtongxin/django | django/contrib/gis/sitemaps/views.py | 341 | 2421 | from __future__ import unicode_literals
from django.apps import apps
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.functions import AsKML, Transform
from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz
from django.core.exceptions import FieldDoesNotExist
from django.db import DEFAULT_DB_ALIAS, connections
from django.http import Http404
def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS):
"""
This view generates KML for the given app label, model, and field name.
The model's default manager must be GeoManager, and the field name
must be that of a geographic field.
"""
placemarks = []
try:
klass = apps.get_model(label, model)
except LookupError:
raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model))
if field_name:
try:
field = klass._meta.get_field(field_name)
if not isinstance(field, GeometryField):
raise FieldDoesNotExist
except FieldDoesNotExist:
raise Http404('Invalid geometry field.')
connection = connections[using]
if connection.features.has_AsKML_function:
# Database will take care of transformation.
placemarks = klass._default_manager.using(using).annotate(kml=AsKML(field_name))
else:
# If the database offers no KML method, we use the `kml`
# attribute of the lazy geometry instead.
placemarks = []
if connection.features.has_Transform_function:
qs = klass._default_manager.using(using).annotate(
**{'%s_4326' % field_name: Transform(field_name, 4326)})
field_name += '_4326'
else:
qs = klass._default_manager.using(using).all()
for mod in qs:
mod.kml = getattr(mod, field_name).kml
placemarks.append(mod)
# Getting the render function and rendering to the correct.
if compress:
render = render_to_kmz
else:
render = render_to_kml
return render('gis/kml/placemarks.kml', {'places': placemarks})
def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS):
"""
This view returns KMZ for the given app label, model, and field name.
"""
return kml(request, label, model, field_name, compress=True, using=using)
| bsd-3-clause |
FreekingDean/home-assistant | homeassistant/components/sensor/cups.py | 25 | 4535 | """
Details about printers which are connected to CUPS.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.cups/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['pycups==1.9.73']
_LOGGER = logging.getLogger(__name__)
ATTR_DEVICE_URI = 'device_uri'
ATTR_PRINTER_INFO = 'printer_info'
ATTR_PRINTER_IS_SHARED = 'printer_is_shared'
ATTR_PRINTER_LOCATION = 'printer_location'
ATTR_PRINTER_MODEL = 'printer_model'
ATTR_PRINTER_STATE_MESSAGE = 'printer_state_message'
ATTR_PRINTER_STATE_REASON = 'printer_state_reason'
ATTR_PRINTER_TYPE = 'printer_type'
ATTR_PRINTER_URI_SUPPORTED = 'printer_uri_supported'
CONF_PRINTERS = 'printers'
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 631
ICON = 'mdi:printer'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
PRINTER_STATES = {
3: 'idle',
4: 'printing',
5: 'stopped',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_PRINTERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the CUPS sensor."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
printers = config.get(CONF_PRINTERS)
try:
data = CupsData(host, port)
data.update()
except RuntimeError:
_LOGGER.error("Unable to connect to CUPS server: %s:%s", host, port)
return False
dev = []
for printer in printers:
if printer in data.printers:
dev.append(CupsSensor(data, printer))
else:
_LOGGER.error("Printer is not present: %s", printer)
continue
add_devices(dev)
class CupsSensor(Entity):
"""Representation of a CUPS sensor."""
def __init__(self, data, printer):
"""Initialize the CUPS sensor."""
self.data = data
self._name = printer
self._printer = None
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._printer is not None:
try:
return next(v for k, v in PRINTER_STATES.items()
if self._printer['printer-state'] == k)
except StopIteration:
return self._printer['printer-state']
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
if self._printer is not None:
return {
ATTR_DEVICE_URI: self._printer['device-uri'],
ATTR_PRINTER_INFO: self._printer['printer-info'],
ATTR_PRINTER_IS_SHARED: self._printer['printer-is-shared'],
ATTR_PRINTER_LOCATION: self._printer['printer-location'],
ATTR_PRINTER_MODEL: self._printer['printer-make-and-model'],
ATTR_PRINTER_STATE_MESSAGE:
self._printer['printer-state-message'],
ATTR_PRINTER_STATE_REASON:
self._printer['printer-state-reasons'],
ATTR_PRINTER_TYPE: self._printer['printer-type'],
ATTR_PRINTER_URI_SUPPORTED:
self._printer['printer-uri-supported'],
}
def update(self):
"""Get the latest data and updates the states."""
self.data.update()
self._printer = self.data.printers.get(self._name)
# pylint: disable=import-error
class CupsData(object):
"""Get the latest data from CUPS and update the state."""
def __init__(self, host, port):
"""Initialize the data object."""
self._host = host
self._port = port
self.printers = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from CUPS."""
from cups import Connection
conn = Connection(host=self._host, port=self._port)
self.printers = conn.getPrinters()
| mit |
rahuldhote/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
KWierso/treeherder | tests/model/test_suite_public_name.py | 1 | 4454 | import pytest
from django.db.utils import IntegrityError
SAME_SUITE_PUBLIC_NAME = 'same suite name'
SAME_TEST_PUBLIC_NAME = 'same test name'
SAME_SUITE = 'same suite'
SAME_TEST = 'same test'
@pytest.mark.parametrize("suite_public_name, suite_public_name_2,"
"test_public_name, test_public_name_2,"
"suite, suite_2, test, test_2", [
(SAME_SUITE_PUBLIC_NAME, SAME_SUITE_PUBLIC_NAME,
SAME_TEST_PUBLIC_NAME, SAME_TEST_PUBLIC_NAME,
SAME_SUITE, SAME_SUITE, 'test', 'test_2'),
(SAME_SUITE_PUBLIC_NAME, SAME_SUITE_PUBLIC_NAME,
SAME_TEST_PUBLIC_NAME, SAME_TEST_PUBLIC_NAME,
'suite', 'suite_2', SAME_TEST, SAME_TEST),
(SAME_SUITE_PUBLIC_NAME, SAME_SUITE_PUBLIC_NAME,
SAME_TEST_PUBLIC_NAME, SAME_TEST_PUBLIC_NAME,
'suite', 'suite_2', 'test', 'test_2'),
])
def test_trigger_public_suite_name_constraint(test_perf_signature, test_perf_signature_2,
suite_public_name, suite_public_name_2,
test_public_name, test_public_name_2,
suite, suite_2, test, test_2):
test_perf_signature.suite_public_name = suite_public_name
test_perf_signature.test_public_name = test_public_name
test_perf_signature.suite = suite
test_perf_signature.test = test
test_perf_signature.save()
test_perf_signature_2.suite_public_name = suite_public_name_2
test_perf_signature_2.test_public_name = test_public_name_2
test_perf_signature_2.suite = suite_2
test_perf_signature_2.test = test_2
with pytest.raises(IntegrityError):
test_perf_signature_2.save()
@pytest.mark.parametrize("suite_public_name, suite_public_name_2,"
"test_public_name, test_public_name_2,"
"suite, suite_2, test, test_2", [
(None, None, None, None, 'suite', 'suite_2', 'test', 'test_2'),
('suite_public_name', 'suite_public_name_2', None, None,
'suite', 'suite_2', 'test', 'test_2'),
(None, None, 'test', 'test_2', 'suite', 'suite_2', 'test', 'test_2'),
('suite_public_name', None, 'test', None, 'suite', 'suite_2', 'test', 'test_2'),
('suite_public_name', 'suite_public_name_2',
SAME_TEST_PUBLIC_NAME, SAME_TEST_PUBLIC_NAME,
'suite', 'suite_2', 'test', 'test_2'),
(SAME_SUITE_PUBLIC_NAME, SAME_SUITE_PUBLIC_NAME,
'test_public_name', 'test_public_name_2',
'suite', 'suite_2', 'test', 'test_2'),
('suite_public_name', 'suite_public_name_2',
SAME_TEST_PUBLIC_NAME, SAME_TEST_PUBLIC_NAME,
SAME_SUITE, SAME_SUITE, SAME_TEST, SAME_TEST),
('suite_public_name', 'suite_public_name_2',
'test_public_name', 'test_public_name_2',
'suite', 'suite_2', 'test', 'test_2'),
])
def test_do_not_trigger_public_suite_name_constraint(test_perf_signature, test_perf_signature_2,
suite_public_name, suite_public_name_2,
test_public_name, test_public_name_2,
suite, suite_2, test, test_2):
test_perf_signature.suite_public_name = suite_public_name
test_perf_signature.test_public_name = test_public_name
test_perf_signature.suite = suite
test_perf_signature.test = test
test_perf_signature.save()
test_perf_signature_2.suite_public_name = suite_public_name_2
test_perf_signature_2.test_public_name = test_public_name_2
test_perf_signature_2.suite = suite_2
test_perf_signature_2.test = test_2
test_perf_signature_2.save()
| mpl-2.0 |
caot/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/tests/geoapp/test_regress.py | 258 | 1500 | import os, unittest
from django.contrib.gis.tests.utils import no_mysql, no_oracle, no_postgis, no_spatialite
from django.contrib.gis.shortcuts import render_to_kmz
from models import City
class GeoRegressionTests(unittest.TestCase):
def test01_update(self):
"Testing GeoQuerySet.update(), see #10411."
pnt = City.objects.get(name='Pueblo').point
bak = pnt.clone()
pnt.y += 0.005
pnt.x += 0.005
City.objects.filter(name='Pueblo').update(point=pnt)
self.assertEqual(pnt, City.objects.get(name='Pueblo').point)
City.objects.filter(name='Pueblo').update(point=bak)
self.assertEqual(bak, City.objects.get(name='Pueblo').point)
def test02_kmz(self):
"Testing `render_to_kmz` with non-ASCII data, see #11624."
name = '\xc3\x85land Islands'.decode('iso-8859-1')
places = [{'name' : name,
'description' : name,
'kml' : '<Point><coordinates>5.0,23.0</coordinates></Point>'
}]
kmz = render_to_kmz('gis/kml/placemarks.kml', {'places' : places})
@no_spatialite
@no_mysql
def test03_extent(self):
"Testing `extent` on a table with a single point, see #11827."
pnt = City.objects.get(name='Pueblo').point
ref_ext = (pnt.x, pnt.y, pnt.x, pnt.y)
extent = City.objects.filter(name='Pueblo').extent()
for ref_val, val in zip(ref_ext, extent):
self.assertAlmostEqual(ref_val, val, 4)
| apache-2.0 |
jspraul/bite-project | deps/closure/closure-library/closure/bin/scopify.py | 125 | 6316 | #!/usr/bin/python2.4
#
# Copyright 2010 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Automatically converts codebases over to goog.scope.
Usage:
cd path/to/my/dir;
../../../../javascript/closure/bin/scopify.py
Scans every file in this directory, recursively. Looks for existing
goog.scope calls, and goog.require'd symbols. If it makes sense to
generate a goog.scope call for the file, then we will do so, and
try to auto-generate some aliases based on the goog.require'd symbols.
Known Issues:
When a file is goog.scope'd, the file contents will be indented +2.
This may put some lines over 80 chars. These will need to be fixed manually.
We will only try to create aliases for capitalized names. We do not check
to see if those names will conflict with any existing locals.
This creates merge conflicts for every line of every outstanding change.
If you intend to run this on your codebase, make sure your team members
know. Better yet, send them this script so that they can scopify their
outstanding changes and "accept theirs".
When an alias is "captured", it can no longer be stubbed out for testing.
Run your tests.
"""
__author__ = 'nicksantos@google.com (Nick Santos)'
import os.path
import re
import sys
REQUIRES_RE = re.compile(r"goog.require\('([^']*)'\)")
# Edit this manually if you want something to "always" be aliased.
# TODO(nicksantos): Add a flag for this.
DEFAULT_ALIASES = {}
def Transform(lines):
"""Converts the contents of a file into javascript that uses goog.scope.
Arguments:
lines: A list of strings, corresponding to each line of the file.
Returns:
A new list of strings, or None if the file was not modified.
"""
requires = []
# Do an initial scan to be sure that this file can be processed.
for line in lines:
# Skip this file if it has already been scopified.
if line.find('goog.scope') != -1:
return None
# If there are any global vars or functions, then we also have
# to skip the whole file. We might be able to deal with this
# more elegantly.
if line.find('var ') == 0 or line.find('function ') == 0:
return None
for match in REQUIRES_RE.finditer(line):
requires.append(match.group(1))
if len(requires) == 0:
return None
# Backwards-sort the requires, so that when one is a substring of another,
# we match the longer one first.
for val in DEFAULT_ALIASES.values():
if requires.count(val) == 0:
requires.append(val)
requires.sort()
requires.reverse()
# Generate a map of requires to their aliases
aliases_to_globals = DEFAULT_ALIASES.copy()
for req in requires:
index = req.rfind('.')
if index == -1:
alias = req
else:
alias = req[(index + 1):]
# Don't scopify lowercase namespaces, because they may conflict with
# local variables.
if alias[0].isupper():
aliases_to_globals[alias] = req
aliases_to_matchers = {}
globals_to_aliases = {}
for alias, symbol in aliases_to_globals.items():
globals_to_aliases[symbol] = alias
aliases_to_matchers[alias] = re.compile('\\b%s\\b' % symbol)
# Insert a goog.scope that aliases all required symbols.
result = []
START = 0
SEEN_REQUIRES = 1
IN_SCOPE = 2
mode = START
aliases_used = set()
insertion_index = None
for line in lines:
if mode == START:
result.append(line)
if re.search(REQUIRES_RE, line):
mode = SEEN_REQUIRES
elif mode == SEEN_REQUIRES:
if (line and
not re.search(REQUIRES_RE, line) and
not line.isspace()):
result.append('goog.scope(function() {\n')
insertion_index = len(result)
result.append('\n')
mode = IN_SCOPE
else:
result.append(line)
if mode == IN_SCOPE:
for symbol in requires:
if not symbol in globals_to_aliases:
continue
alias = globals_to_aliases[symbol]
matcher = aliases_to_matchers[alias]
for match in matcher.finditer(line):
# Check to make sure we're not in a string.
# We do this by being as conservative as possible:
# if there are any quote or double quote characters
# before the symbol on this line, then bail out.
before_symbol = line[:match.start(0)]
if before_symbol.count('"') > 0 or before_symbol.count("'") > 0:
continue
line = line.replace(match.group(0), alias)
aliases_used.add(alias)
if line.isspace():
# Truncate all-whitespace lines
result.append('\n')
else:
result.append(' ' + line)
if len(aliases_used):
aliases_used = [alias for alias in aliases_used]
aliases_used.sort()
aliases_used.reverse()
for alias in aliases_used:
symbol = aliases_to_globals[alias]
result.insert(insertion_index,
' var %s = %s;\n' % (alias, symbol))
result.append('});\n')
return result
else:
return None
def TransformFileAt(path):
"""Converts a file into javascript that uses goog.scope.
Arguments:
path: A path to a file.
"""
f = open(path)
lines = Transform(f.readlines())
if lines:
f = open(path, 'w')
for l in lines:
f.write(l)
f.close()
if __name__ == '__main__':
args = sys.argv[1:]
if not len(args):
args = '.'
for file_name in args:
if os.path.isdir(file_name):
for root, dirs, files in os.walk(file_name):
for name in files:
if name.endswith('.js') and \
not os.path.islink(os.path.join(root, name)):
TransformFileAt(os.path.join(root, name))
else:
if file_name.endswith('.js') and \
not os.path.islink(file_name):
TransformFileAt(file_name)
| apache-2.0 |
bmi-forum/bmi-pyre | Snac/pyre/Inlet.py | 5 | 3206 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
class Inlet(object):
def __init__(self):
self._handle = None
return
def impose(self):
import Snac.pyre.Exchanger as Exchanger
Exchanger.Inlet_impose(self._handle)
return
def recv(self):
import Snac.pyre.Exchanger as Exchanger
Exchanger.Inlet_recv(self._handle)
return
def storeTimestep(self, fge_t, cge_t):
import Snac.pyre.Exchanger as Exchanger
Exchanger.Inlet_storeTimestep(self._handle, fge_t, cge_t)
return
"""
class SVTInlet(Inlet):
def __init__(self, mesh, sink, all_variables):
import CitcomS.Exchanger as Exchanger
self._handle = Exchanger.SVTInlet_create(mesh,
sink,
all_variables)
return
"""
class VInlet(Inlet):
def __init__(self, mesh, sink, all_variables):
import Snac.pyre.Exchanger as Exchanger
self._handle = Exchanger.VInlet_create(mesh,
sink,
all_variables)
return
def storeVold(self):
import Snac.pyre.Exchanger as Exchanger
Exchanger.VInlet_storeVold(self._handle)
return
def readVold(self):
import Snac.pyre.Exchanger as Exchanger
Exchanger.VInlet_readVold(self._handle)
return
"""
class VTInlet(Inlet):
def __init__(self, mesh, sink, all_variables):
import Snac.pyre.Exchanger as Exchanger
self._handle = Exchanger.VTInlet_create(mesh,
sink,
all_variables)
return
class BoundaryVTInlet(Inlet):
'''Available modes -- see above
'''
def __init__(self, communicator, boundary, sink, all_variables, mode="VT"):
import CitcomS.Exchanger as Exchanger
self._handle = Exchanger.BoundaryVTInlet_create(communicator.handle(),
boundary,
sink,
all_variables,
mode)
return
class TractionInlet(Inlet):
'''Inlet that impose velocity and/or traction on the boundary
Available modes --
"F": traction only
"V": velocity only
"FV": normal velocity and tangent traction
'''
def __init__(self, boundary, sink, all_variables, mode='F'):
import CitcomS.Exchanger as Exchanger
self._handle = Exchanger.TractionInlet_create(boundary,
sink,
all_variables,
mode)
return
"""
# version
__id__ = "$Id: Inlet.py,v 1.6 2004/05/11 07:59:31 tan2 Exp $"
# End of file
| gpl-2.0 |
richardfergie/googleads-python-lib | googleads/adwords.py | 4 | 35332 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client library for the AdWords API."""
import io
import os
import sys
import urllib
import urllib2
from xml.etree import ElementTree
import suds.client
import suds.mx.literal
import suds.xsd.doctor
import googleads.common
import googleads.errors
# A giant dictionary of AdWords versions, the services they support, and which
# namespace those services are in.
_SERVICE_MAP = {
'v201502': {
'AccountLabelService': 'mcm',
'AdCustomizerFeedService': 'cm',
'AdGroupAdService': 'cm',
'AdGroupBidModifierService': 'cm',
'AdGroupCriterionService': 'cm',
'AdGroupExtensionSettingService': 'cm',
'AdGroupFeedService': 'cm',
'AdGroupService': 'cm',
'AdParamService': 'cm',
'AdwordsUserListService': 'rm',
'BiddingStrategyService': 'cm',
'BudgetOrderService': 'billing',
'BudgetService': 'cm',
'CampaignCriterionService': 'cm',
'CampaignExtensionSettingService': 'cm',
'CampaignFeedService': 'cm',
'CampaignService': 'cm',
'CampaignSharedSetService': 'cm',
'ConstantDataService': 'cm',
'ConversionTrackerService': 'cm',
'CustomerExtensionSettingService': 'cm',
'CustomerFeedService': 'cm',
'CustomerService': 'mcm',
'CustomerSyncService': 'ch',
'DataService': 'cm',
'ExperimentService': 'cm',
'FeedItemService': 'cm',
'FeedMappingService': 'cm',
'FeedService': 'cm',
'GeoLocationService': 'cm',
'LabelService': 'cm',
'LocationCriterionService': 'cm',
'ManagedCustomerService': 'mcm',
'MediaService': 'cm',
'MutateJobService': 'cm',
'OfflineConversionFeedService': 'cm',
'ReportDefinitionService': 'cm',
'SharedCriterionService': 'cm',
'SharedSetService': 'cm',
'TargetingIdeaService': 'o',
'TrafficEstimatorService': 'o',
},
'v201506': {
'AccountLabelService': 'mcm',
'AdCustomizerFeedService': 'cm',
'AdGroupAdService': 'cm',
'AdGroupBidModifierService': 'cm',
'AdGroupCriterionService': 'cm',
'AdGroupExtensionSettingService': 'cm',
'AdGroupFeedService': 'cm',
'AdGroupService': 'cm',
'AdParamService': 'cm',
'AdwordsUserListService': 'rm',
'BiddingStrategyService': 'cm',
'BudgetOrderService': 'billing',
'BudgetService': 'cm',
'CampaignCriterionService': 'cm',
'CampaignExtensionSettingService': 'cm',
'CampaignFeedService': 'cm',
'CampaignService': 'cm',
'CampaignSharedSetService': 'cm',
'ConstantDataService': 'cm',
'ConversionTrackerService': 'cm',
'CustomerExtensionSettingService': 'cm',
'CustomerFeedService': 'cm',
'CustomerService': 'mcm',
'CustomerSyncService': 'ch',
'DataService': 'cm',
'ExperimentService': 'cm',
'FeedItemService': 'cm',
'FeedMappingService': 'cm',
'FeedService': 'cm',
'GeoLocationService': 'cm',
'LabelService': 'cm',
'LocationCriterionService': 'cm',
'ManagedCustomerService': 'mcm',
'MediaService': 'cm',
'MutateJobService': 'cm',
'OfflineConversionFeedService': 'cm',
'ReportDefinitionService': 'cm',
'SharedCriterionService': 'cm',
'SharedSetService': 'cm',
'TargetingIdeaService': 'o',
'TrafficEstimatorService': 'o',
}
}
# Supported kwargs for params sent in the report header.
_REPORT_HEADER_KWARGS = {'include_zero_impressions': 'includeZeroImpressions',
'skip_report_header': 'skipReportHeader',
'skip_column_header': 'skipColumnHeader',
'skip_report_summary': 'skipReportSummary'}
# The endpoint used by default when making AdWords API requests.
_DEFAULT_ENDPOINT = 'https://adwords.google.com'
class AdWordsClient(object):
"""A central location to set headers and create web service clients.
Attributes:
developer_token: A string containing your AdWords API developer token.
oauth2_client: A googleads.oauth2.GoogleOAuth2Client used to authorize your
requests.
user_agent: An arbitrary string which will be used to identify your
application
client_customer_id: A string identifying which AdWords customer you want to
act as.
validate_only: A boolean indicating if you want your request to be validated
but not actually executed.
partial_failure: A boolean indicating if you want your mutate calls
containing several operations, some of which fail and some of which
succeed, to result in a complete failure with no changes made or a
partial failure with some changes made. Only certain services respect
this header.
https_proxy: A string identifying the URL of a proxy that all HTTPS requests
should be routed through. Modifying this value will not affect any SOAP
service clients you've already created.
"""
# The key in the storage yaml which contains AdWords data.
_YAML_KEY = 'adwords'
# A list of values which must be provided to use AdWords.
_REQUIRED_INIT_VALUES = ('user_agent', 'developer_token')
# A list of values which may optionally be provided when using AdWords.
_OPTIONAL_INIT_VALUES = ('validate_only', 'partial_failure',
'client_customer_id')
# The format of SOAP service WSDLs. A server, namespace, version, and service
# name need to be formatted in.
_SOAP_SERVICE_FORMAT = '%s/api/adwords/%s/%s/%s?wsdl'
@classmethod
def LoadFromStorage(cls, path=None):
"""Creates an AdWordsClient with information stored in a yaml file.
Args:
[optional]
path: The path string to the file containing cached AdWords data.
Returns:
An AdWordsClient initialized with the values cached in the file.
Raises:
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required key was missing or an OAuth 2.0 key was missing.
"""
if path is None:
path = os.path.join(os.path.expanduser('~'), 'googleads.yaml')
return cls(**googleads.common.LoadFromStorage(
path, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES,
cls._OPTIONAL_INIT_VALUES))
def __init__(
self, developer_token, oauth2_client, user_agent,
client_customer_id=None, validate_only=False, partial_failure=False,
https_proxy=None, cache=None):
"""Initializes an AdWordsClient.
For more information on these arguments, see our SOAP headers guide:
https://developers.google.com/adwords/api/docs/guides/soap
Args:
developer_token: A string containing your AdWords API developer token.
oauth2_client: A googleads.oauth2.GoogleOAuth2Client used to authorize
your requests.
user_agent: An arbitrary string which will be used to identify your
application
[optional]
client_customer_id: A string identifying which AdWords customer you want
to act as. You do not have to provide this if you are using a client
account. You probably want to provide this if you're using an MCC
account.
validate_only: A boolean indicating if you want your request to be
validated but not actually executed.
partial_failure: A boolean indicating if you want your mutate calls
containing several operations, some of which fail and some of which
succeed, to result in a complete failure with no changes made or a
partial failure with some changes made. Only certain services respect
this header.
https_proxy: A string identifying the proxy that all HTTPS requests
should be routed through.
cache: A subclass of suds.cache.Cache; defaults to None.
"""
self.developer_token = developer_token
self.oauth2_client = oauth2_client
self.oauth2_client.Refresh()
self.user_agent = user_agent
self.client_customer_id = client_customer_id
self.validate_only = validate_only
self.partial_failure = partial_failure
self.https_proxy = https_proxy
self.cache = cache
def GetService(self, service_name, version=sorted(_SERVICE_MAP.keys())[-1],
server=_DEFAULT_ENDPOINT):
"""Creates a service client for the given service.
Args:
service_name: A string identifying which AdWords service to create a
service client for.
[optional]
version: A string identifying the AdWords version to connect to. This
defaults to what is currently the latest version. This will be updated
in future releases to point to what is then the latest version.
server: A string identifying the webserver hosting the AdWords API.
Returns:
A suds.client.ServiceSelector which has the headers and proxy configured
for use.
Raises:
A GoogleAdsValueError if the service or version provided do not exist.
"""
if server[-1] == '/': server = server[:-1]
try:
proxy_option = None
if self.https_proxy:
proxy_option = {
'https': self.https_proxy
}
client = suds.client.Client(
self._SOAP_SERVICE_FORMAT %
(server, _SERVICE_MAP[version][service_name], version, service_name),
proxy=proxy_option, cache=self.cache, timeout=3600)
except KeyError:
if version in _SERVICE_MAP:
raise googleads.errors.GoogleAdsValueError(
'Unrecognized service for the AdWords API. Service given: %s '
'Supported services: %s'
% (service_name, _SERVICE_MAP[version].keys()))
else:
raise googleads.errors.GoogleAdsValueError(
'Unrecognized version of the AdWords API. Version given: %s '
'Supported versions: %s' % (version, _SERVICE_MAP.keys()))
return googleads.common.SudsServiceProxy(
client, _AdWordsHeaderHandler(self, version))
def GetReportDownloader(self, version=sorted(_SERVICE_MAP.keys())[-1],
server=_DEFAULT_ENDPOINT):
"""Creates a downloader for AdWords reports.
This is a convenience method. It is functionally identical to calling
ReportDownloader(adwords_client, version, server)
Args:
[optional]
version: A string identifying the AdWords version to connect to. This
defaults to what is currently the latest version. This will be updated
in future releases to point to what is then the latest version.
server: A string identifying the webserver hosting the AdWords API.
Returns:
A ReportDownloader tied to this AdWordsClient, ready to download reports.
"""
return ReportDownloader(self, version, server)
def SetClientCustomerId(self, client_customer_id):
"""Change the client customer id used by the AdWordsClient instance.
Args:
client_customer_id: str New Client Customer Id to use.
"""
self.client_customer_id = client_customer_id
class _AdWordsHeaderHandler(googleads.common.HeaderHandler):
"""Handler which generates the headers for AdWords requests."""
# The library signature for AdWords, to be appended to all user agents.
_LIB_SIG = googleads.common.GenerateLibSig('AwApi-Python')
# The name of the WSDL-defined SOAP Header class used in all SOAP requests.
# The namespace needs the version of AdWords being used to be templated in.
_SOAP_HEADER_CLASS = ('{https://adwords.google.com/api/adwords/cm/%s}'
'SoapHeader')
# The content type of report download requests
_CONTENT_TYPE = 'application/x-www-form-urlencoded'
def __init__(self, adwords_client, version):
"""Initializes an AdWordsHeaderHandler.
Args:
adwords_client: An AdWordsClient whose data will be used to fill in the
headers. We retain a reference to this object so that the header
handler picks up changes to the client.
version: A string identifying which version of AdWords this header handler
will be used for.
"""
self._adwords_client = adwords_client
self._version = version
def SetHeaders(self, suds_client):
"""Sets the SOAP and HTTP headers on the given suds client."""
header = suds_client.factory.create(self._SOAP_HEADER_CLASS % self._version)
header.clientCustomerId = self._adwords_client.client_customer_id
header.developerToken = self._adwords_client.developer_token
header.userAgent = ''.join([self._adwords_client.user_agent, self._LIB_SIG])
header.validateOnly = self._adwords_client.validate_only
header.partialFailure = self._adwords_client.partial_failure
suds_client.set_options(
soapheaders=header,
headers=self._adwords_client.oauth2_client.CreateHttpHeader())
def GetReportDownloadHeaders(self, kwargs):
"""Returns a dictionary of headers for a report download request.
Args:
kwargs: A dictionary containing optional keyword arguments.
Keyword Arguments:
include_zero_impressions: A boolean indicating whether the report should
show rows with zero impressions.
skip_report_header: A boolean indicating whether to include a header row
containing the report name and date range. If false or not specified,
report output will include the header row.
skip_column_header: A boolean indicating whether to include column names
in reports. If false or not specified, report output will include the
column names.
skip_report_summary: A boolean indicating whether to include a summary row
containing the report totals. If false or not specified, report output
will include the summary row.
Returns:
A dictionary containing the headers configured for downloading a report.
"""
headers = self._adwords_client.oauth2_client.CreateHttpHeader()
headers.update({
'Content-type': self._CONTENT_TYPE,
'developerToken': str(self._adwords_client.developer_token),
'clientCustomerId': str(self._adwords_client.client_customer_id),
'User-Agent': ''.join([self._adwords_client.user_agent, self._LIB_SIG,
',gzip'])
})
for kw in kwargs:
try:
headers.update({_REPORT_HEADER_KWARGS[kw]: str(kwargs[kw])})
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'The provided keyword "%s" is invalid. Accepted keywords are: %s'
% (kw, _REPORT_HEADER_KWARGS.keys()))
return headers
class ReportDownloader(object):
"""A utility that can be used to download reports from AdWords."""
# The namespace format for report download requests. A version needs to be
# formatted into it.
_NAMESPACE_FORMAT = 'https://adwords.google.com/api/adwords/cm/%s'
# The endpoint format for report download requests. A server and version need
# to be formatted into it.
_END_POINT_FORMAT = '%s/api/adwords/reportdownload/%s'
# The schema location format for report download requests. A server and
# version need to be formatted into it.
_SCHEMA_FORMAT = '/'.join([_END_POINT_FORMAT, 'reportDefinition.xsd'])
# The name of the complex type representing a report definition.
_REPORT_DEFINITION_NAME = 'reportDefinition'
def __init__(self, adwords_client, version=sorted(_SERVICE_MAP.keys())[-1],
server=_DEFAULT_ENDPOINT):
"""Initializes a ReportDownloader.
Args:
adwords_client: The AdwordsClient whose attributes will be used to
authorize your report download requests.
[optional]
version: A string identifying the AdWords version to connect to. This
defaults to what is currently the latest version. This will be updated
in future releases to point to what is then the latest version.
server: A string identifying the webserver hosting the AdWords API.
"""
if server[-1] == '/': server = server[:-1]
self._adwords_client = adwords_client
self._namespace = self._NAMESPACE_FORMAT % version
self._end_point = self._END_POINT_FORMAT % (server, version)
self._header_handler = _AdWordsHeaderHandler(adwords_client, version)
proxy_option = None
if self._adwords_client.https_proxy:
proxy_option = {'https': self._adwords_client.https_proxy}
# Create an Opener to handle requests when downloading reports.
self.url_opener = urllib2.build_opener(
urllib2.ProxyHandler({'https': self._adwords_client.https_proxy}))
else:
self.url_opener = urllib2.build_opener()
schema_url = self._SCHEMA_FORMAT % (server, version)
schema = suds.client.Client(
schema_url,
doctor=suds.xsd.doctor.ImportDoctor(suds.xsd.doctor.Import(
self._namespace, schema_url)),
proxy=proxy_option, cache=self._adwords_client.cache).wsdl.schema
self._report_definition_type = schema.elements[
(self._REPORT_DEFINITION_NAME, self._namespace)]
self._marshaller = suds.mx.literal.Literal(schema)
def _DownloadReportCheckFormat(self, file_format, output):
if(file_format.startswith('GZIPPED_')
and not (('b' in getattr(output, 'mode', 'w')) or
type(output) is io.BytesIO)):
raise googleads.errors.GoogleAdsValueError('Need to specify a binary'
' output for GZIPPED formats.')
def DownloadReport(self, report_definition, output=sys.stdout, **kwargs):
"""Downloads an AdWords report using a report definition.
The report contents will be written to the given output.
Args:
report_definition: A dictionary or instance of the ReportDefinition class
generated from the schema. This defines the contents of the report
that will be downloaded.
[optional]
output: A writable object where the contents of the report will be written
to. If the report is gzip compressed, you need to specify an output
that can write binary data.
**kwargs: Optional keyword arguments.
Keyword Arguments:
include_zero_impressions: A boolean indicating whether the report should
show rows with zero impressions.
skip_report_header: A boolean indicating whether to include a header row
containing the report name and date range. If false or not specified,
report output will include the header row.
skip_column_header: A boolean indicating whether to include column names
in reports. If false or not specified, report output will include the
column names.
skip_report_summary: A boolean indicating whether to include a summary row
containing the report totals. If false or not specified, report output
will include the summary row.
Raises:
AdWordsReportBadRequestError: if the report download fails due to
improper input.
GoogleAdsValueError: if the user-specified report format is incompatible
with the output.
AdWordsReportError: if the request fails for any other reason; e.g. a
network error.
"""
self._DownloadReportCheckFormat(report_definition['downloadFormat'], output)
self._DownloadReport(self._SerializeReportDefinition(report_definition),
output, kwargs)
def DownloadReportAsStream(self, report_definition, **kwargs):
"""Downloads an AdWords report using a report definition.
This will return a stream, allowing you to retrieve the report contents.
Args:
report_definition: A dictionary or instance of the ReportDefinition class
generated from the schema. This defines the contents of the report
that will be downloaded.
**kwargs: Optional keyword arguments.
Keyword Arguments:
include_zero_impressions: A boolean indicating whether the report should
show rows with zero impressions.
skip_report_header: A boolean indicating whether to include a header row
containing the report name and date range. If false or not specified,
report output will include the header row.
skip_column_header: A boolean indicating whether to include column names
in reports. If false or not specified, report output will include the
column names.
skip_report_summary: A boolean indicating whether to include a summary row
containing the report totals. If false or not specified, report output
will include the summary row.
Returns:
A stream to be used in retrieving the report contents.
Raises:
AdWordsReportBadRequestError: if the report download fails due to
improper input.
GoogleAdsValueError: if the user-specified report format is incompatible
with the output.
AdWordsReportError: if the request fails for any other reason; e.g. a
network error.
"""
return self._DownloadReportAsStream(
self._SerializeReportDefinition(report_definition), kwargs)
def DownloadReportAsStreamWithAwql(self, query, file_format, **kwargs):
"""Downloads an AdWords report using an AWQL query.
The report contents will be returned as a stream.
Args:
query: A string containing the query which specifies the data you want
your report to include.
file_format: A string representing the output format for your report.
Acceptable values can be found in our API documentation:
https://developers.google.com/adwords/api/docs/guides/reporting
**kwargs: Optional keyword arguments.
Keyword Arguments:
include_zero_impressions: A boolean indicating whether the report should
show rows with zero impressions.
skip_report_header: A boolean indicating whether to include a header row
containing the report name and date range. If false or not specified,
report output will include the header row.
skip_column_header: A boolean indicating whether to include column names
in reports. If false or not specified, report output will include the
column names.
skip_report_summary: A boolean indicating whether to include a summary row
containing the report totals. If false or not specified, report output
will include the summary row.
Returns:
A stream to be used in retrieving the report contents.
Raises:
AdWordsReportBadRequestError: if the report download fails due to
improper input.
GoogleAdsValueError: if the user-specified report format is incompatible
with the output.
AdWordsReportError: if the request fails for any other reason; e.g. a
network error.
"""
return self._DownloadReportAsStream(self._SerializeAwql(query, file_format),
kwargs)
def DownloadReportAsString(self, report_definition, **kwargs):
"""Downloads an AdWords report using a report definition.
The report contents will be returned as a string.
Args:
report_definition: A dictionary or instance of the ReportDefinition class
generated from the schema. This defines the contents of the report
that will be downloaded.
**kwargs: Optional keyword arguments.
Keyword Arguments:
include_zero_impressions: A boolean indicating whether the report should
show rows with zero impressions.
skip_report_header: A boolean indicating whether to include a header row
containing the report name and date range. If false or not specified,
report output will include the header row.
skip_column_header: A boolean indicating whether to include column names
in reports. If false or not specified, report output will include the
column names.
skip_report_summary: A boolean indicating whether to include a summary row
containing the report totals. If false or not specified, report output
will include the summary row.
Returns:
A string containing the report contents.
Raises:
AdWordsReportBadRequestError: if the report download fails due to
improper input.
GoogleAdsValueError: if the user-specified report format is incompatible
with the output.
AdWordsReportError: if the request fails for any other reason; e.g. a
network error.
"""
response = None
try:
response = self._DownloadReportAsStream(
self._SerializeReportDefinition(report_definition), kwargs)
return response.read().decode('utf-8')
finally:
if response:
response.close()
def DownloadReportAsStringWithAwql(self, query, file_format, **kwargs):
"""Downloads an AdWords report using an AWQL query.
The report contents will be returned as a string.
Args:
query: A string containing the query which specifies the data you want
your report to include.
file_format: A string representing the output format for your report.
Acceptable values can be found in our API documentation:
https://developers.google.com/adwords/api/docs/guides/reporting
**kwargs: Optional keyword arguments.
Keyword Arguments:
include_zero_impressions: A boolean indicating whether the report should
show rows with zero impressions.
skip_report_header: A boolean indicating whether to include a header row
containing the report name and date range. If false or not specified,
report output will include the header row.
skip_column_header: A boolean indicating whether to include column names
in reports. If false or not specified, report output will include the
column names.
skip_report_summary: A boolean indicating whether to include a summary row
containing the report totals. If false or not specified, report output
will include the summary row.
Returns:
A string containing the report contents.
Raises:
AdWordsReportBadRequestError: if the report download fails due to
improper input.
GoogleAdsValueError: if the user-specified report format is incompatible
with the output.
AdWordsReportError: if the request fails for any other reason; e.g. a
network error.
"""
response = None
try:
response = self._DownloadReportAsStream(
self._SerializeAwql(query, file_format), kwargs)
return response.read().decode('utf-8')
finally:
if response:
response.close()
def DownloadReportWithAwql(self, query, file_format, output=sys.stdout,
**kwargs):
"""Downloads an AdWords report using an AWQL query.
The report contents will be written to the given output.
Args:
query: A string containing the query which specifies the data you want
your report to include.
file_format: A string representing the output format for your report.
Acceptable values can be found in our API documentation:
https://developers.google.com/adwords/api/docs/guides/reporting
[optional]
output: A writable object where the contents of the report will be written
to. If the report is gzip compressed, you need to specify an output
that can write binary data.
**kwargs: Optional keyword arguments.
Keyword Arguments:
include_zero_impressions: A boolean indicating whether the report should
show rows with zero impressions.
skip_report_header: A boolean indicating whether to include a header row
containing the report name and date range. If false or not specified,
report output will include the header row.
skip_column_header: A boolean indicating whether to include column names
in reports. If false or not specified, report output will include the
column names.
skip_report_summary: A boolean indicating whether to include a summary row
containing the report totals. If false or not specified, report output
will include the summary row.
Raises:
AdWordsReportBadRequestError: if the report download fails due to
improper input.
GoogleAdsValueError: if the user-specified report format is incompatible
with the output.
AdWordsReportError: if the request fails for any other reason; e.g. a
network error.
"""
self._DownloadReportCheckFormat(file_format, output)
self._DownloadReport(self._SerializeAwql(query, file_format), output,
kwargs)
def _DownloadReport(self, post_body, output, kwargs):
"""Downloads an AdWords report, writing the contents to the given file.
Args:
post_body: The contents of the POST request's body as a URL encoded
string.
output: A writable object where the contents of the report will be written
to.
kwargs: A dictionary containing optional keyword arguments.
Keyword Arguments:
include_zero_impressions: A boolean indicating whether the report should
show rows with zero impressions.
skip_report_header: A boolean indicating whether to include a header row
containing the report name and date range. If false or not specified,
report output will include the header row.
skip_column_header: A boolean indicating whether to include column names
in reports. If false or not specified, report output will include the
column names.
skip_report_summary: A boolean indicating whether to include a summary row
containing the report totals. If false or not specified, report output
will include the summary row.
Raises:
AdWordsReportBadRequestError: if the report download fails due to
improper input. In the event of certain other failures, a
urllib2.URLError (Python 2) or urllib.error.URLError (Python 3) will be
raised.
AdWordsReportError: if the request fails for any other reason; e.g. a
network error.
"""
response = None
try:
response = self._DownloadReportAsStream(post_body, kwargs)
output.write(response.read().decode() if sys.version_info[0] == 3
and (getattr(output, 'mode', 'w') == 'w'
and type(output) is not io.BytesIO)
else response.read())
finally:
if response:
response.close()
def _DownloadReportAsStream(self, post_body, kwargs):
"""Downloads an AdWords report, returning a stream.
Args:
post_body: The contents of the POST request's body as a URL encoded
string.
kwargs: A dictionary containing optional keyword arguments.
Keyword Arguments:
include_zero_impressions: A boolean indicating whether the report should
show rows with zero impressions.
skip_report_header: A boolean indicating whether to include a header row
containing the report name and date range. If false or not specified,
report output will include the header row.
skip_column_header: A boolean indicating whether to include column names
in reports. If false or not specified, report output will include the
column names.
skip_report_summary: A boolean indicating whether to include a summary row
containing the report totals. If false or not specified, report output
will include the summary row.
Returns:
A stream to be used in retrieving the report contents.
Raises:
AdWordsReportBadRequestError: if the report download fails due to
improper input. In the event of certain other failures, a
urllib2.URLError (Python 2) or urllib.error.URLError (Python 3) will be
raised.
AdWordsReportError: if the request fails for any other reason; e.g. a
network error.
"""
if sys.version_info[0] == 3:
post_body = bytes(post_body, 'utf8')
request = urllib2.Request(
self._end_point, post_body,
self._header_handler.GetReportDownloadHeaders(kwargs))
try:
return self.url_opener.open(request)
except urllib2.HTTPError, e:
raise self._ExtractError(e)
def _SerializeAwql(self, query, file_format):
"""Serializes an AWQL query and file format for transport.
Args:
query: A string representing the AWQL query used in the report.
file_format: A string representing the file format of the generated
report.
Returns:
The given query and format URL encoded into the format needed for an
AdWords report request as a string. This is intended to be a POST body.
"""
return urllib.urlencode({'__fmt': file_format, '__rdquery': query})
def _SerializeReportDefinition(self, report_definition):
"""Serializes a report definition for transport.
Args:
report_definition: A dictionary or ReportDefinition object to be
serialized.
Returns:
The given report definition serialized into XML and then URL encoded into
the format needed for an AdWords report request as a string. This is
intended to be a POST body.
"""
content = suds.mx.Content(
tag=self._REPORT_DEFINITION_NAME, value=report_definition,
name=self._REPORT_DEFINITION_NAME, type=self._report_definition_type)
return urllib.urlencode({'__rdxml': self._marshaller.process(content)})
def _ExtractError(self, error):
"""Attempts to extract information from a report download error XML message.
Args:
error: A urllib2.HTTPError describing the report download failure.
Returns:
An error that should be raised. If the content was an XML error message,
an AdWordsReportBadRequestError will be returned. Otherwise, an
AdWordsReportError will be returned.
"""
content = error.read()
if sys.version_info[0] == 3:
content = content.decode()
if 'reportDownloadError' in content:
try:
tree = ElementTree.fromstring(content)
return googleads.errors.AdWordsReportBadRequestError(
tree.find('./ApiError/type').text,
tree.find('./ApiError/trigger').text,
tree.find('./ApiError/fieldPath').text,
error.code, error, content)
except ElementTree.ParseError:
pass
return googleads.errors.AdWordsReportError(
error.code, error, content)
| apache-2.0 |
ldjebran/robottelo | tests/foreman/ui/test_sync.py | 2 | 5841 | """Test class for Custom Sync UI
:Requirement: Sync
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: Repositories
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import pytest
from fauxfactory import gen_string
from nailgun import entities
from robottelo import manifests
from robottelo.api.utils import enable_rhrepo_and_fetchid
from robottelo.constants import (
DISTRO_RHEL6, DISTRO_RHEL7,
DOCKER_REGISTRY_HUB,
DOCKER_UPSTREAM_NAME,
FAKE_1_YUM_REPO,
FEDORA27_OSTREE_REPO,
REPOS,
REPOSET,
REPO_TYPE,
PRDS,
)
from robottelo.decorators import (
fixture,
run_in_one_thread,
skip_if_not_set,
tier2,
upgrade,
)
from robottelo.decorators.host import skip_if_os
from robottelo.products import (
RepositoryCollection,
RHELCloudFormsTools,
SatelliteCapsuleRepository,
)
@fixture(scope='module')
def module_org():
return entities.Organization().create()
@fixture(scope='module')
def module_custom_product(module_org):
return entities.Product(organization=module_org).create()
@fixture(scope='module')
def module_org_with_manifest():
org = entities.Organization().create()
manifests.upload_manifest_locked(org.id)
return org
@tier2
def test_positive_sync_custom_repo(session, module_custom_product):
"""Create Content Custom Sync with minimal input parameters
:id: 00fb0b04-0293-42c2-92fa-930c75acee89
:expectedresults: Sync procedure is successful
:CaseImportance: Critical
"""
repo = entities.Repository(
url=FAKE_1_YUM_REPO, product=module_custom_product).create()
with session:
results = session.sync_status.synchronize([
(module_custom_product.name, repo.name)])
assert len(results) == 1
assert results[0] == 'Syncing Complete.'
@run_in_one_thread
@skip_if_not_set('fake_manifest')
@tier2
@upgrade
def test_positive_sync_rh_repos(session, module_org_with_manifest):
"""Create Content RedHat Sync with two repos.
:id: e30f6509-0b65-4bcc-a522-b4f3089d3911
:expectedresults: Sync procedure for RedHat Repos is successful
:CaseLevel: Integration
"""
repos = (
SatelliteCapsuleRepository(cdn=True),
RHELCloudFormsTools(cdn=True)
)
distros = [DISTRO_RHEL7, DISTRO_RHEL6]
repo_collections = [
RepositoryCollection(distro=distro, repositories=[repo])
for distro, repo in zip(distros, repos)
]
for repo_collection in repo_collections:
repo_collection.setup(module_org_with_manifest.id, synchronize=False)
repo_paths = [
(
repo.repo_data['product'],
repo.repo_data.get('releasever'),
repo.repo_data.get('arch'),
repo.repo_data['name'],
)
for repo in repos
]
with session:
session.organization.select(org_name=module_org_with_manifest.name)
results = session.sync_status.synchronize(repo_paths)
assert len(results) == len(repo_paths)
assert all([result == 'Syncing Complete.' for result in results])
@pytest.mark.skip_if_open("BZ:1625783")
@skip_if_os('RHEL6')
@tier2
@upgrade
def test_positive_sync_custom_ostree_repo(session, module_custom_product):
"""Create custom ostree repository and sync it.
:id: e4119b9b-0356-4661-a3ec-e5807224f7d2
:expectedresults: ostree repo should be synced successfully
:CaseLevel: Integration
:BZ: 1625783
"""
repo = entities.Repository(
content_type='ostree',
url=FEDORA27_OSTREE_REPO,
product=module_custom_product,
unprotected=False,
).create()
with session:
results = session.sync_status.synchronize([
(module_custom_product.name, repo.name)])
assert len(results) == 1
assert results[0] == 'Syncing Complete.'
@run_in_one_thread
@pytest.mark.skip_if_open("BZ:1625783")
@skip_if_os('RHEL6')
@skip_if_not_set('fake_manifest')
@tier2
@upgrade
def test_positive_sync_rh_ostree_repo(session, module_org_with_manifest):
"""Sync CDN based ostree repository.
:id: 4d28fff0-5fda-4eee-aa0c-c5af02c31de5
:Steps:
1. Import a valid manifest
2. Enable the OStree repo and sync it
:expectedresults: ostree repo should be synced successfully from CDN
:CaseLevel: Integration
:BZ: 1625783
"""
enable_rhrepo_and_fetchid(
basearch=None,
org_id=module_org_with_manifest.id,
product=PRDS['rhah'],
repo=REPOS['rhaht']['name'],
reposet=REPOSET['rhaht'],
releasever=None,
)
with session:
session.organization.select(org_name=module_org_with_manifest.name)
results = session.sync_status.synchronize([
(PRDS['rhah'], REPOS['rhaht']['name'])])
assert len(results) == 1
assert results[0] == 'Syncing Complete.'
@tier2
@upgrade
def test_positive_sync_docker_via_sync_status(session, module_org):
"""Create custom docker repo and sync it via the sync status page.
:id: 00b700f4-7e52-48ed-98b2-e49b3be102f2
:expectedresults: Sync procedure for specific docker repository is
successful
:CaseLevel: Integration
"""
product = entities.Product(organization=module_org).create()
repo_name = gen_string('alphanumeric')
with session:
session.repository.create(
product.name,
{'name': repo_name,
'repo_type': REPO_TYPE['docker'],
'repo_content.upstream_url': DOCKER_REGISTRY_HUB,
'repo_content.upstream_repo_name': DOCKER_UPSTREAM_NAME}
)
assert session.repository.search(product.name, repo_name)[0]['Name'] == repo_name
result = session.sync_status.synchronize([(product.name, repo_name)])
assert result[0] == 'Syncing Complete.'
| gpl-3.0 |
AmrThabet/CouchPotatoServer | libs/bs4/testing.py | 440 | 24510 | """Helper classes for tests."""
import copy
import functools
import unittest
from unittest import TestCase
from bs4 import BeautifulSoup
from bs4.element import (
CharsetMetaAttributeValue,
Comment,
ContentMetaAttributeValue,
Doctype,
SoupStrainer,
)
from bs4.builder import HTMLParserTreeBuilder
default_builder = HTMLParserTreeBuilder
class SoupTest(unittest.TestCase):
@property
def default_builder(self):
return default_builder()
def soup(self, markup, **kwargs):
"""Build a Beautiful Soup object from markup."""
builder = kwargs.pop('builder', self.default_builder)
return BeautifulSoup(markup, builder=builder, **kwargs)
def document_for(self, markup):
"""Turn an HTML fragment into a document.
The details depend on the builder.
"""
return self.default_builder.test_fragment_to_document(markup)
def assertSoupEquals(self, to_parse, compare_parsed_to=None):
builder = self.default_builder
obj = BeautifulSoup(to_parse, builder=builder)
if compare_parsed_to is None:
compare_parsed_to = to_parse
self.assertEqual(obj.decode(), self.document_for(compare_parsed_to))
class HTMLTreeBuilderSmokeTest(object):
"""A basic test of a treebuilder's competence.
Any HTML treebuilder, present or future, should be able to pass
these tests. With invalid markup, there's room for interpretation,
and different parsers can handle it differently. But with the
markup in these tests, there's not much room for interpretation.
"""
def assertDoctypeHandled(self, doctype_fragment):
"""Assert that a given doctype string is handled correctly."""
doctype_str, soup = self._document_with_doctype(doctype_fragment)
# Make sure a Doctype object was created.
doctype = soup.contents[0]
self.assertEqual(doctype.__class__, Doctype)
self.assertEqual(doctype, doctype_fragment)
self.assertEqual(str(soup)[:len(doctype_str)], doctype_str)
# Make sure that the doctype was correctly associated with the
# parse tree and that the rest of the document parsed.
self.assertEqual(soup.p.contents[0], 'foo')
def _document_with_doctype(self, doctype_fragment):
"""Generate and parse a document with the given doctype."""
doctype = '<!DOCTYPE %s>' % doctype_fragment
markup = doctype + '\n<p>foo</p>'
soup = self.soup(markup)
return doctype, soup
def test_normal_doctypes(self):
"""Make sure normal, everyday HTML doctypes are handled correctly."""
self.assertDoctypeHandled("html")
self.assertDoctypeHandled(
'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"')
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
self.assertEqual("", doctype.strip())
def test_public_doctype_with_url(self):
doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"'
self.assertDoctypeHandled(doctype)
def test_system_doctype(self):
self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"')
def test_namespaced_system_doctype(self):
# We can handle a namespaced doctype with a system ID.
self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"')
def test_namespaced_public_doctype(self):
# Test a namespaced doctype with a public id.
self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"')
def test_real_xhtml_document(self):
"""A real XHTML document should come out more or less the same as it went in."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8").replace(b"\n", b""),
markup.replace(b"\n", b""))
def test_deepcopy(self):
"""Make sure you can copy the tree builder.
This is important because the builder is part of a
BeautifulSoup object, and we want to be able to copy that.
"""
copy.deepcopy(self.default_builder)
def test_p_tag_is_never_empty_element(self):
"""A <p> tag is never designated as an empty-element tag.
Even if the markup shows it as an empty-element tag, it
shouldn't be presented that way.
"""
soup = self.soup("<p/>")
self.assertFalse(soup.p.is_empty_element)
self.assertEqual(str(soup.p), "<p></p>")
def test_unclosed_tags_get_closed(self):
"""A tag that's not closed by the end of the document should be closed.
This applies to all tags except empty-element tags.
"""
self.assertSoupEquals("<p>", "<p></p>")
self.assertSoupEquals("<b>", "<b></b>")
self.assertSoupEquals("<br>", "<br/>")
def test_br_is_always_empty_element_tag(self):
"""A <br> tag is designated as an empty-element tag.
Some parsers treat <br></br> as one <br/> tag, some parsers as
two tags, but it should always be an empty-element tag.
"""
soup = self.soup("<br></br>")
self.assertTrue(soup.br.is_empty_element)
self.assertEqual(str(soup.br), "<br/>")
def test_nested_formatting_elements(self):
self.assertSoupEquals("<em><em></em></em>")
def test_comment(self):
# Comments are represented as Comment objects.
markup = "<p>foo<!--foobar-->baz</p>"
self.assertSoupEquals(markup)
soup = self.soup(markup)
comment = soup.find(text="foobar")
self.assertEqual(comment.__class__, Comment)
# The comment is properly integrated into the tree.
foo = soup.find(text="foo")
self.assertEqual(comment, foo.next_element)
baz = soup.find(text="baz")
self.assertEqual(comment, baz.previous_element)
def test_preserved_whitespace_in_pre_and_textarea(self):
"""Whitespace must be preserved in <pre> and <textarea> tags."""
self.assertSoupEquals("<pre> </pre>")
self.assertSoupEquals("<textarea> woo </textarea>")
def test_nested_inline_elements(self):
"""Inline elements can be nested indefinitely."""
b_tag = "<b>Inside a B tag</b>"
self.assertSoupEquals(b_tag)
nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>"
self.assertSoupEquals(nested_b_tag)
double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>"
self.assertSoupEquals(nested_b_tag)
def test_nested_block_level_elements(self):
"""Block elements can be nested."""
soup = self.soup('<blockquote><p><b>Foo</b></p></blockquote>')
blockquote = soup.blockquote
self.assertEqual(blockquote.p.b.string, 'Foo')
self.assertEqual(blockquote.b.string, 'Foo')
def test_correctly_nested_tables(self):
"""One table can go inside another one."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assertSoupEquals(
markup,
'<table id="1"><tr><td>Here\'s another table:'
'<table id="2"><tr><td>foo</td></tr></table>'
'</td></tr></table>')
self.assertSoupEquals(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_deeply_nested_multivalued_attribute(self):
# html5lib can set the attributes of the same tag many times
# as it rearranges the tree. This has caused problems with
# multivalued attributes.
markup = '<table><div><div class="css"></div></div></table>'
soup = self.soup(markup)
self.assertEqual(["css"], soup.div.div['class'])
def test_angle_brackets_in_attribute_values_are_escaped(self):
self.assertSoupEquals('<a b="<a>"></a>', '<a b="<a>"></a>')
def test_entities_in_attributes_converted_to_unicode(self):
expect = u'<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>'
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
def test_entities_in_text_converted_to_unicode(self):
expect = u'<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>'
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
def test_quot_entity_converted_to_quotation_mark(self):
self.assertSoupEquals("<p>I said "good day!"</p>",
'<p>I said "good day!"</p>')
def test_out_of_range_entity(self):
expect = u"\N{REPLACEMENT CHARACTER}"
self.assertSoupEquals("�", expect)
self.assertSoupEquals("�", expect)
self.assertSoupEquals("�", expect)
def test_multipart_strings(self):
"Mostly to prevent a recurrence of a bug in the html5lib treebuilder."
soup = self.soup("<html><h2>\nfoo</h2><p></p></html>")
self.assertEqual("p", soup.h2.string.next_element.name)
self.assertEqual("p", soup.p.name)
def test_basic_namespaces(self):
"""Parsers don't need to *understand* namespaces, but at the
very least they should not choke on namespaces or lose
data."""
markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>'
soup = self.soup(markup)
self.assertEqual(markup, soup.encode())
html = soup.html
self.assertEqual('http://www.w3.org/1999/xhtml', soup.html['xmlns'])
self.assertEqual(
'http://www.w3.org/1998/Math/MathML', soup.html['xmlns:mathml'])
self.assertEqual(
'http://www.w3.org/2000/svg', soup.html['xmlns:svg'])
def test_multivalued_attribute_value_becomes_list(self):
markup = b'<a class="foo bar">'
soup = self.soup(markup)
self.assertEqual(['foo', 'bar'], soup.a['class'])
#
# Generally speaking, tests below this point are more tests of
# Beautiful Soup than tests of the tree builders. But parsers are
# weird, so we run these tests separately for every tree builder
# to detect any differences between them.
#
def test_can_parse_unicode_document(self):
# A seemingly innocuous document... but it's in Unicode! And
# it contains characters that can't be represented in the
# encoding found in the declaration! The horror!
markup = u'<html><head><meta encoding="euc-jp"></head><body>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</body>'
soup = self.soup(markup)
self.assertEqual(u'Sacr\xe9 bleu!', soup.body.string)
def test_soupstrainer(self):
"""Parsers should be able to work with SoupStrainers."""
strainer = SoupStrainer("b")
soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>",
parse_only=strainer)
self.assertEqual(soup.decode(), "<b>bold</b>")
def test_single_quote_attribute_values_become_double_quotes(self):
self.assertSoupEquals("<foo attr='bar'></foo>",
'<foo attr="bar"></foo>')
def test_attribute_values_with_nested_quotes_are_left_alone(self):
text = """<foo attr='bar "brawls" happen'>a</foo>"""
self.assertSoupEquals(text)
def test_attribute_values_with_double_nested_quotes_get_quoted(self):
text = """<foo attr='bar "brawls" happen'>a</foo>"""
soup = self.soup(text)
soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"'
self.assertSoupEquals(
soup.foo.decode(),
"""<foo attr="Brawls happen at "Bob\'s Bar"">a</foo>""")
def test_ampersand_in_attribute_value_gets_escaped(self):
self.assertSoupEquals('<this is="really messed up & stuff"></this>',
'<this is="really messed up & stuff"></this>')
self.assertSoupEquals(
'<a href="http://example.org?a=1&b=2;3">foo</a>',
'<a href="http://example.org?a=1&b=2;3">foo</a>')
def test_escaped_ampersand_in_attribute_value_is_left_alone(self):
self.assertSoupEquals('<a href="http://example.org?a=1&b=2;3"></a>')
def test_entities_in_strings_converted_during_parsing(self):
# Both XML and HTML entities are converted to Unicode characters
# during parsing.
text = "<p><<sacré bleu!>></p>"
expected = u"<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>"
self.assertSoupEquals(text, expected)
def test_smart_quotes_converted_on_the_way_in(self):
# Microsoft smart quotes are converted to Unicode characters during
# parsing.
quote = b"<p>\x91Foo\x92</p>"
soup = self.soup(quote)
self.assertEqual(
soup.p.string,
u"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}")
def test_non_breaking_spaces_converted_on_the_way_in(self):
soup = self.soup("<a> </a>")
self.assertEqual(soup.a.string, u"\N{NO-BREAK SPACE}" * 2)
def test_entities_converted_on_the_way_out(self):
text = "<p><<sacré bleu!>></p>"
expected = u"<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>".encode("utf-8")
soup = self.soup(text)
self.assertEqual(soup.p.encode("utf-8"), expected)
def test_real_iso_latin_document(self):
# Smoke test of interrelated functionality, using an
# easy-to-understand document.
# Here it is in Unicode. Note that it claims to be in ISO-Latin-1.
unicode_html = u'<html><head><meta content="text/html; charset=ISO-Latin-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>'
# That's because we're going to encode it into ISO-Latin-1, and use
# that to test.
iso_latin_html = unicode_html.encode("iso-8859-1")
# Parse the ISO-Latin-1 HTML.
soup = self.soup(iso_latin_html)
# Encode it to UTF-8.
result = soup.encode("utf-8")
# What do we expect the result to look like? Well, it would
# look like unicode_html, except that the META tag would say
# UTF-8 instead of ISO-Latin-1.
expected = unicode_html.replace("ISO-Latin-1", "utf-8")
# And, of course, it would be in UTF-8, not Unicode.
expected = expected.encode("utf-8")
# Ta-da!
self.assertEqual(result, expected)
def test_real_shift_jis_document(self):
# Smoke test to make sure the parser can handle a document in
# Shift-JIS encoding, without choking.
shift_jis_html = (
b'<html><head></head><body><pre>'
b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
b'</pre></body></html>')
unicode_html = shift_jis_html.decode("shift-jis")
soup = self.soup(unicode_html)
# Make sure the parse tree is correctly encoded to various
# encodings.
self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8"))
self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp"))
def test_real_hebrew_document(self):
# A real-world test to make sure we can convert ISO-8859-9 (a
# Hebrew encoding) to UTF-8.
hebrew_document = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>'
soup = self.soup(
hebrew_document, from_encoding="iso8859-8")
self.assertEqual(soup.original_encoding, 'iso8859-8')
self.assertEqual(
soup.encode('utf-8'),
hebrew_document.decode("iso8859-8").encode("utf-8"))
def test_meta_tag_reflects_current_encoding(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
# Here's a document incorporating that meta tag.
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja"/>'
'</head><body>Shift-JIS markup goes here.') % meta_tag
soup = self.soup(shift_jis_html)
# Parse the document, and the charset is seemingly unaffected.
parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'})
content = parsed_meta['content']
self.assertEqual('text/html; charset=x-sjis', content)
# But that value is actually a ContentMetaAttributeValue object.
self.assertTrue(isinstance(content, ContentMetaAttributeValue))
# And it will take on a value that reflects its current
# encoding.
self.assertEqual('text/html; charset=utf8', content.encode("utf8"))
# For the rest of the story, see TestSubstitutions in
# test_tree.py.
def test_html5_style_meta_tag_reflects_current_encoding(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta id="encoding" charset="x-sjis" />')
# Here's a document incorporating that meta tag.
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja"/>'
'</head><body>Shift-JIS markup goes here.') % meta_tag
soup = self.soup(shift_jis_html)
# Parse the document, and the charset is seemingly unaffected.
parsed_meta = soup.find('meta', id="encoding")
charset = parsed_meta['charset']
self.assertEqual('x-sjis', charset)
# But that value is actually a CharsetMetaAttributeValue object.
self.assertTrue(isinstance(charset, CharsetMetaAttributeValue))
# And it will take on a value that reflects its current
# encoding.
self.assertEqual('utf8', charset.encode("utf8"))
def test_tag_with_no_attributes_can_have_attributes_added(self):
data = self.soup("<a>text</a>")
data.a['foo'] = 'bar'
self.assertEqual('<a foo="bar">text</a>', data.a.decode())
class XMLTreeBuilderSmokeTest(object):
def test_docstring_generated(self):
soup = self.soup("<root/>")
self.assertEqual(
soup.encode(), b'<?xml version="1.0" encoding="utf-8"?>\n<root/>')
def test_real_xhtml_document(self):
"""A real XHTML document should come out *exactly* the same as it went in."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8"), markup)
def test_formatter_processes_script_tag_for_xml_documents(self):
doc = """
<script type="text/javascript">
</script>
"""
soup = BeautifulSoup(doc, "xml")
# lxml would have stripped this while parsing, but we can add
# it later.
soup.script.string = 'console.log("< < hey > > ");'
encoded = soup.encode()
self.assertTrue(b"< < hey > >" in encoded)
def test_can_parse_unicode_document(self):
markup = u'<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>'
soup = self.soup(markup)
self.assertEqual(u'Sacr\xe9 bleu!', soup.root.string)
def test_popping_namespaced_tag(self):
markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>'
soup = self.soup(markup)
self.assertEqual(
unicode(soup.rss), markup)
def test_docstring_includes_correct_encoding(self):
soup = self.soup("<root/>")
self.assertEqual(
soup.encode("latin1"),
b'<?xml version="1.0" encoding="latin1"?>\n<root/>')
def test_large_xml_document(self):
"""A large XML document should come out the same as it went in."""
markup = (b'<?xml version="1.0" encoding="utf-8"?>\n<root>'
+ b'0' * (2**12)
+ b'</root>')
soup = self.soup(markup)
self.assertEqual(soup.encode("utf-8"), markup)
def test_tags_are_empty_element_if_and_only_if_they_are_empty(self):
self.assertSoupEquals("<p>", "<p/>")
self.assertSoupEquals("<p>foo</p>")
def test_namespaces_are_preserved(self):
markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>'
soup = self.soup(markup)
root = soup.root
self.assertEqual("http://example.com/", root['xmlns:a'])
self.assertEqual("http://example.net/", root['xmlns:b'])
def test_closing_namespaced_tag(self):
markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>'
soup = self.soup(markup)
self.assertEqual(unicode(soup.p), markup)
def test_namespaced_attributes(self):
markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>'
soup = self.soup(markup)
self.assertEqual(unicode(soup.foo), markup)
def test_namespaced_attributes_xml_namespace(self):
markup = '<foo xml:lang="fr">bar</foo>'
soup = self.soup(markup)
self.assertEqual(unicode(soup.foo), markup)
class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest):
"""Smoke test for a tree builder that supports HTML5."""
def test_real_xhtml_document(self):
# Since XHTML is not HTML5, HTML5 parsers are not tested to handle
# XHTML documents in any particular way.
pass
def test_html_tags_have_namespace(self):
markup = "<a>"
soup = self.soup(markup)
self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace)
def test_svg_tags_have_namespace(self):
markup = '<svg><circle/></svg>'
soup = self.soup(markup)
namespace = "http://www.w3.org/2000/svg"
self.assertEqual(namespace, soup.svg.namespace)
self.assertEqual(namespace, soup.circle.namespace)
def test_mathml_tags_have_namespace(self):
markup = '<math><msqrt>5</msqrt></math>'
soup = self.soup(markup)
namespace = 'http://www.w3.org/1998/Math/MathML'
self.assertEqual(namespace, soup.math.namespace)
self.assertEqual(namespace, soup.msqrt.namespace)
def test_xml_declaration_becomes_comment(self):
markup = '<?xml version="1.0" encoding="utf-8"?><html></html>'
soup = self.soup(markup)
self.assertTrue(isinstance(soup.contents[0], Comment))
self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?')
self.assertEqual("html", soup.contents[0].next_element.name)
def skipIf(condition, reason):
def nothing(test, *args, **kwargs):
return None
def decorator(test_item):
if condition:
return nothing
else:
return test_item
return decorator
| gpl-3.0 |
bit-trade-one/SoundModuleAP | lib-src/lv2/lv2/waflib/Tools/dmd.py | 8 | 1562 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import sys
from waflib.Tools import ar,d
from waflib.Configure import conf
@conf
def find_dmd(conf):
conf.find_program(['dmd','dmd2','ldc'],var='D')
out=conf.cmd_and_log([conf.env.D,'--help'])
if out.find("D Compiler v")==-1:
out=conf.cmd_and_log([conf.env.D,'-version'])
if out.find("based on DMD v1.")==-1:
conf.fatal("detected compiler is not dmd/ldc")
@conf
def common_flags_ldc(conf):
v=conf.env
v['DFLAGS']=['-d-version=Posix']
v['LINKFLAGS']=[]
v['DFLAGS_dshlib']=['-relocation-model=pic']
@conf
def common_flags_dmd(conf):
v=conf.env
v['D_SRC_F']=['-c']
v['D_TGT_F']='-of%s'
v['D_LINKER']=v['D']
v['DLNK_SRC_F']=''
v['DLNK_TGT_F']='-of%s'
v['DINC_ST']='-I%s'
v['DSHLIB_MARKER']=v['DSTLIB_MARKER']=''
v['DSTLIB_ST']=v['DSHLIB_ST']='-L-l%s'
v['DSTLIBPATH_ST']=v['DLIBPATH_ST']='-L-L%s'
v['LINKFLAGS_dprogram']=['-quiet']
v['DFLAGS_dshlib']=['-fPIC']
v['LINKFLAGS_dshlib']=['-L-shared']
v['DHEADER_ext']='.di'
v.DFLAGS_d_with_header=['-H','-Hf']
v['D_HDR_F']='%s'
def configure(conf):
conf.find_dmd()
if sys.platform=='win32':
out=conf.cmd_and_log([conf.env.D,'--help'])
if out.find("D Compiler v2.")>-1:
conf.fatal('dmd2 on Windows is not supported, use gdc or ldc2 instead')
conf.load('ar')
conf.load('d')
conf.common_flags_dmd()
conf.d_platform_flags()
if str(conf.env.D).find('ldc')>-1:
conf.common_flags_ldc()
| gpl-2.0 |
projectcalico/calico-neutron | neutron/tests/unit/vmware/nsxlib/test_router.py | 21 | 46835 | # Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
from oslo.config import cfg
from neutron.common import exceptions
from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.api_client import version as version_module
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware import nsxlib
from neutron.plugins.vmware.nsxlib import router as routerlib
from neutron.plugins.vmware.nsxlib import switch as switchlib
from neutron.tests.unit import test_api_v2
from neutron.tests.unit.vmware.nsxlib import base
_uuid = test_api_v2._uuid
class TestNatRules(base.NsxlibTestCase):
def _test_create_lrouter_dnat_rule(self, version):
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version(version)):
tenant_id = 'pippo'
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
tenant_id,
'fake_router',
'192.168.0.1')
nat_rule = routerlib.create_lrouter_dnat_rule(
self.fake_cluster, lrouter['uuid'], '10.0.0.99',
match_criteria={'destination_ip_addresses':
'192.168.0.5'})
uri = nsxlib._build_uri_path(routerlib.LROUTERNAT_RESOURCE,
nat_rule['uuid'],
lrouter['uuid'])
resp_obj = nsxlib.do_request("GET", uri, cluster=self.fake_cluster)
self.assertEqual('DestinationNatRule', resp_obj['type'])
self.assertEqual('192.168.0.5',
resp_obj['match']['destination_ip_addresses'])
def test_create_lrouter_dnat_rule_v2(self):
self._test_create_lrouter_dnat_rule('2.9')
def test_create_lrouter_dnat_rule_v31(self):
self._test_create_lrouter_dnat_rule('3.1')
class TestExplicitLRouters(base.NsxlibTestCase):
def setUp(self):
self.fake_version = '3.2'
super(TestExplicitLRouters, self).setUp()
def _get_lrouter(self, tenant_id, router_name, router_id, relations=None):
schema = '/ws.v1/schema/RoutingTableRoutingConfig'
router = {'display_name': router_name,
'uuid': router_id,
'tags': utils.get_tags(os_tid=tenant_id),
'distributed': False,
'routing_config': {'type': 'RoutingTableRoutingConfig',
'_schema': schema},
'_schema': schema,
'nat_synchronization_enabled': True,
'replication_mode': 'service',
'type': 'LogicalRouterConfig',
'_href': '/ws.v1/lrouter/%s' % router_id, }
if relations:
router['_relations'] = relations
return router
def _get_single_route(self, router_id, route_id='fake_route_id_0',
prefix='0.0.0.0/0', next_hop_ip='1.1.1.1'):
return {'protocol': 'static',
'_href': '/ws.v1/lrouter/%s/rib/%s' % (router_id, route_id),
'prefix': prefix,
'_schema': '/ws.v1/schema/RoutingTableEntry',
'next_hop_ip': next_hop_ip,
'action': 'accept',
'uuid': route_id}
def test_prepare_body_with_implicit_routing_config(self):
router_name = 'fake_router_name'
tenant_id = 'fake_tenant_id'
neutron_router_id = 'pipita_higuain'
router_type = 'SingleDefaultRouteImplicitRoutingConfig'
route_config = {
'default_route_next_hop': {'gateway_ip_address': 'fake_address',
'type': 'RouterNextHop'}, }
body = routerlib._prepare_lrouter_body(router_name, neutron_router_id,
tenant_id, router_type,
**route_config)
expected = {'display_name': 'fake_router_name',
'routing_config': {
'default_route_next_hop':
{'gateway_ip_address': 'fake_address',
'type': 'RouterNextHop'},
'type': 'SingleDefaultRouteImplicitRoutingConfig'},
'tags': utils.get_tags(os_tid='fake_tenant_id',
q_router_id='pipita_higuain'),
'type': 'LogicalRouterConfig',
'replication_mode': cfg.CONF.NSX.replication_mode}
self.assertEqual(expected, body)
def test_prepare_body_without_routing_config(self):
router_name = 'fake_router_name'
tenant_id = 'fake_tenant_id'
neutron_router_id = 'marekiaro_hamsik'
router_type = 'RoutingTableRoutingConfig'
body = routerlib._prepare_lrouter_body(router_name, neutron_router_id,
tenant_id, router_type)
expected = {'display_name': 'fake_router_name',
'routing_config': {'type': 'RoutingTableRoutingConfig'},
'tags': utils.get_tags(os_tid='fake_tenant_id',
q_router_id='marekiaro_hamsik'),
'type': 'LogicalRouterConfig',
'replication_mode': cfg.CONF.NSX.replication_mode}
self.assertEqual(expected, body)
def test_get_lrouter(self):
tenant_id = 'fake_tenant_id'
router_name = 'fake_router_name'
router_id = 'fake_router_id'
relations = {
'LogicalRouterStatus':
{'_href': '/ws.v1/lrouter/%s/status' % router_id,
'lport_admin_up_count': 1,
'_schema': '/ws.v1/schema/LogicalRouterStatus',
'lport_count': 1,
'fabric_status': True,
'type': 'LogicalRouterStatus',
'lport_link_up_count': 0, }, }
with mock.patch.object(nsxlib, 'do_request',
return_value=self._get_lrouter(tenant_id,
router_name,
router_id,
relations)):
lrouter = routerlib.get_lrouter(self.fake_cluster, router_id)
self.assertTrue(
lrouter['_relations']['LogicalRouterStatus']['fabric_status'])
def test_create_lrouter(self):
tenant_id = 'fake_tenant_id'
router_name = 'fake_router_name'
router_id = 'fake_router_id'
nexthop_ip = '10.0.0.1'
with mock.patch.object(
nsxlib, 'do_request',
return_value=self._get_lrouter(tenant_id,
router_name,
router_id)):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
tenant_id,
router_name, nexthop_ip)
self.assertEqual(lrouter['routing_config']['type'],
'RoutingTableRoutingConfig')
self.assertNotIn('default_route_next_hop',
lrouter['routing_config'])
def test_update_lrouter_with_no_routes(self):
router_id = 'fake_router_id'
new_routes = [{"nexthop": "10.0.0.2",
"destination": "169.254.169.0/30"}, ]
nsx_routes = [self._get_single_route(router_id)]
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
return_value=nsx_routes):
with mock.patch.object(routerlib, 'create_explicit_route_lrouter',
return_value='fake_uuid'):
old_routes = routerlib.update_explicit_routes_lrouter(
self.fake_cluster, router_id, new_routes)
self.assertEqual(old_routes, nsx_routes)
def test_update_lrouter_with_no_routes_raise_nsx_exception(self):
router_id = 'fake_router_id'
new_routes = [{"nexthop": "10.0.0.2",
"destination": "169.254.169.0/30"}, ]
nsx_routes = [self._get_single_route(router_id)]
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
return_value=nsx_routes):
with mock.patch.object(routerlib, 'create_explicit_route_lrouter',
side_effect=api_exc.NsxApiException):
self.assertRaises(api_exc.NsxApiException,
routerlib.update_explicit_routes_lrouter,
self.fake_cluster, router_id, new_routes)
def test_update_lrouter_with_routes(self):
router_id = 'fake_router_id'
new_routes = [{"next_hop_ip": "10.0.0.2",
"prefix": "169.254.169.0/30"}, ]
nsx_routes = [self._get_single_route(router_id),
self._get_single_route(router_id, 'fake_route_id_1',
'0.0.0.1/24', '10.0.0.3'),
self._get_single_route(router_id, 'fake_route_id_2',
'0.0.0.2/24', '10.0.0.4'), ]
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
return_value=nsx_routes):
with mock.patch.object(routerlib, 'delete_explicit_route_lrouter',
return_value=None):
with mock.patch.object(routerlib,
'create_explicit_route_lrouter',
return_value='fake_uuid'):
old_routes = routerlib.update_explicit_routes_lrouter(
self.fake_cluster, router_id, new_routes)
self.assertEqual(old_routes, nsx_routes)
def test_update_lrouter_with_routes_raises_nsx_expception(self):
router_id = 'fake_router_id'
new_routes = [{"nexthop": "10.0.0.2",
"destination": "169.254.169.0/30"}, ]
nsx_routes = [self._get_single_route(router_id),
self._get_single_route(router_id, 'fake_route_id_1',
'0.0.0.1/24', '10.0.0.3'),
self._get_single_route(router_id, 'fake_route_id_2',
'0.0.0.2/24', '10.0.0.4'), ]
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
return_value=nsx_routes):
with mock.patch.object(routerlib, 'delete_explicit_route_lrouter',
side_effect=api_exc.NsxApiException):
with mock.patch.object(
routerlib, 'create_explicit_route_lrouter',
return_value='fake_uuid'):
self.assertRaises(
api_exc.NsxApiException,
routerlib.update_explicit_routes_lrouter,
self.fake_cluster, router_id, new_routes)
class RouterNegativeTestCase(base.NsxlibNegativeBaseTestCase):
def test_create_lrouter_on_failure(self):
self.assertRaises(api_exc.NsxApiException,
routerlib.create_lrouter,
self.fake_cluster,
uuidutils.generate_uuid(),
'pluto',
'fake_router',
'my_hop')
def test_delete_lrouter_on_failure(self):
self.assertRaises(api_exc.NsxApiException,
routerlib.delete_lrouter,
self.fake_cluster,
'fake_router')
def test_get_lrouter_on_failure(self):
self.assertRaises(api_exc.NsxApiException,
routerlib.get_lrouter,
self.fake_cluster,
'fake_router')
def test_update_lrouter_on_failure(self):
self.assertRaises(api_exc.NsxApiException,
routerlib.update_lrouter,
self.fake_cluster,
'fake_router',
'pluto',
'new_hop')
class TestLogicalRouters(base.NsxlibTestCase):
def _verify_lrouter(self, res_lrouter,
expected_uuid,
expected_display_name,
expected_nexthop,
expected_tenant_id,
expected_neutron_id=None,
expected_distributed=None):
self.assertEqual(res_lrouter['uuid'], expected_uuid)
nexthop = (res_lrouter['routing_config']
['default_route_next_hop']['gateway_ip_address'])
self.assertEqual(nexthop, expected_nexthop)
router_tags = self._build_tag_dict(res_lrouter['tags'])
self.assertIn('os_tid', router_tags)
self.assertEqual(res_lrouter['display_name'], expected_display_name)
self.assertEqual(expected_tenant_id, router_tags['os_tid'])
if expected_distributed is not None:
self.assertEqual(expected_distributed,
res_lrouter['distributed'])
if expected_neutron_id:
self.assertIn('q_router_id', router_tags)
self.assertEqual(expected_neutron_id, router_tags['q_router_id'])
def test_get_lrouters(self):
lrouter_uuids = [routerlib.create_lrouter(
self.fake_cluster, 'whatever', 'pippo', 'fake-lrouter-%s' % k,
'10.0.0.1')['uuid'] for k in range(3)]
routers = routerlib.get_lrouters(self.fake_cluster, 'pippo')
for router in routers:
self.assertIn(router['uuid'], lrouter_uuids)
def _create_lrouter(self, version, neutron_id=None, distributed=None):
with mock.patch.object(
self.fake_cluster.api_client, 'get_version',
return_value=version_module.Version(version)):
if not neutron_id:
neutron_id = uuidutils.generate_uuid()
lrouter = routerlib.create_lrouter(
self.fake_cluster, neutron_id, 'pippo',
'fake-lrouter', '10.0.0.1', distributed=distributed)
return routerlib.get_lrouter(self.fake_cluster,
lrouter['uuid'])
def test_create_and_get_lrouter_v30(self):
neutron_id = uuidutils.generate_uuid()
res_lrouter = self._create_lrouter('3.0', neutron_id=neutron_id)
self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
'fake-lrouter', '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id)
def test_create_and_get_lrouter_v31_centralized(self):
neutron_id = uuidutils.generate_uuid()
res_lrouter = self._create_lrouter('3.1', neutron_id=neutron_id,
distributed=False)
self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
'fake-lrouter', '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id,
expected_distributed=False)
def test_create_and_get_lrouter_v31_distributed(self):
neutron_id = uuidutils.generate_uuid()
res_lrouter = self._create_lrouter('3.1', neutron_id=neutron_id,
distributed=True)
self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
'fake-lrouter', '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id,
expected_distributed=True)
def test_create_and_get_lrouter_name_exceeds_40chars(self):
neutron_id = uuidutils.generate_uuid()
display_name = '*' * 50
lrouter = routerlib.create_lrouter(self.fake_cluster,
neutron_id,
'pippo',
display_name,
'10.0.0.1')
res_lrouter = routerlib.get_lrouter(self.fake_cluster,
lrouter['uuid'])
self._verify_lrouter(res_lrouter, lrouter['uuid'],
'*' * 40, '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id)
def _test_version_dependent_update_lrouter(self, version):
def foo(*args, **kwargs):
return version
foo_func_dict = {
'update_lrouter': {
2: {-1: foo},
3: {-1: foo, 2: foo}
}
}
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
return_value=version_module.Version(version)):
with mock.patch.dict(routerlib.ROUTER_FUNC_DICT,
foo_func_dict, clear=True):
return routerlib.update_lrouter(
self.fake_cluster, 'foo_router_id', 'foo_router_name',
'foo_nexthop', routes={'foo_destination': 'foo_address'})
def test_version_dependent_update_lrouter_old_versions(self):
self.assertRaises(nsx_exc.InvalidVersion,
self._test_version_dependent_update_lrouter,
"2.9")
self.assertRaises(nsx_exc.InvalidVersion,
self._test_version_dependent_update_lrouter,
"3.0")
self.assertRaises(nsx_exc.InvalidVersion,
self._test_version_dependent_update_lrouter,
"3.1")
def test_version_dependent_update_lrouter_new_versions(self):
self.assertEqual("3.2",
self._test_version_dependent_update_lrouter("3.2"))
self.assertEqual("4.0",
self._test_version_dependent_update_lrouter("4.0"))
self.assertEqual("4.1",
self._test_version_dependent_update_lrouter("4.1"))
def test_update_lrouter_no_nexthop(self):
neutron_id = uuidutils.generate_uuid()
lrouter = routerlib.create_lrouter(self.fake_cluster,
neutron_id,
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter = routerlib.update_lrouter(self.fake_cluster,
lrouter['uuid'],
'new_name',
None)
res_lrouter = routerlib.get_lrouter(self.fake_cluster,
lrouter['uuid'])
self._verify_lrouter(res_lrouter, lrouter['uuid'],
'new_name', '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id)
def test_update_lrouter(self):
neutron_id = uuidutils.generate_uuid()
lrouter = routerlib.create_lrouter(self.fake_cluster,
neutron_id,
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter = routerlib.update_lrouter(self.fake_cluster,
lrouter['uuid'],
'new_name',
'192.168.0.1')
res_lrouter = routerlib.get_lrouter(self.fake_cluster,
lrouter['uuid'])
self._verify_lrouter(res_lrouter, lrouter['uuid'],
'new_name', '192.168.0.1', 'pippo',
expected_neutron_id=neutron_id)
def test_update_nonexistent_lrouter_raises(self):
self.assertRaises(exceptions.NotFound,
routerlib.update_lrouter,
self.fake_cluster,
'whatever',
'foo', '9.9.9.9')
def test_delete_lrouter(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
routerlib.delete_lrouter(self.fake_cluster, lrouter['uuid'])
self.assertRaises(exceptions.NotFound,
routerlib.get_lrouter,
self.fake_cluster,
lrouter['uuid'])
def test_query_lrouter_ports(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
router_port_uuids = [routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo',
'qp_id_%s' % k, 'port-%s' % k, True,
['192.168.0.%s' % k], '00:11:22:33:44:55')['uuid']
for k in range(3)]
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 3)
for res_port in ports:
self.assertIn(res_port['uuid'], router_port_uuids)
def test_query_lrouter_lports_nonexistent_lrouter_raises(self):
self.assertRaises(
exceptions.NotFound, routerlib.create_router_lport,
self.fake_cluster, 'booo', 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
def test_create_and_get_lrouter_port(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
port_tags = self._build_tag_dict(res_port['tags'])
self.assertEqual(['192.168.0.1'], res_port['ip_addresses'])
self.assertIn('os_tid', port_tags)
self.assertIn('q_port_id', port_tags)
self.assertEqual('pippo', port_tags['os_tid'])
self.assertEqual('neutron_port_id', port_tags['q_port_id'])
def test_create_lrouter_port_nonexistent_router_raises(self):
self.assertRaises(
exceptions.NotFound, routerlib.create_router_lport,
self.fake_cluster, 'booo', 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
def test_update_lrouter_port(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
routerlib.update_router_lport(
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
'pippo', 'another_port_id', 'name', False,
['192.168.0.1', '10.10.10.254'])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
port_tags = self._build_tag_dict(res_port['tags'])
self.assertEqual(['192.168.0.1', '10.10.10.254'],
res_port['ip_addresses'])
self.assertEqual('False', res_port['admin_status_enabled'])
self.assertIn('os_tid', port_tags)
self.assertIn('q_port_id', port_tags)
self.assertEqual('pippo', port_tags['os_tid'])
self.assertEqual('another_port_id', port_tags['q_port_id'])
def test_update_lrouter_port_nonexistent_router_raises(self):
self.assertRaises(
exceptions.NotFound, routerlib.update_router_lport,
self.fake_cluster, 'boo-router', 'boo-port', 'pippo',
'neutron_port_id', 'name', True, ['192.168.0.1'])
def test_update_lrouter_port_nonexistent_port_raises(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
self.assertRaises(
exceptions.NotFound, routerlib.update_router_lport,
self.fake_cluster, lrouter['uuid'], 'boo-port', 'pippo',
'neutron_port_id', 'name', True, ['192.168.0.1'])
def test_delete_lrouter_port(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'x', 'y', True, [],
'00:11:22:33:44:55')
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
routerlib.delete_router_lport(self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertFalse(len(ports))
def test_delete_lrouter_port_nonexistent_router_raises(self):
self.assertRaises(exceptions.NotFound,
routerlib.delete_router_lport,
self.fake_cluster, 'xyz', 'abc')
def test_delete_lrouter_port_nonexistent_port_raises(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
self.assertRaises(exceptions.NotFound,
routerlib.delete_router_lport,
self.fake_cluster, lrouter['uuid'], 'abc')
def test_delete_peer_lrouter_port(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'x', 'y', True, [],
'00:11:22:33:44:55')
def fakegetport(*args, **kwargs):
return {'_relations': {'LogicalPortAttachment':
{'peer_port_uuid': lrouter_port['uuid']}}}
# mock get_port
with mock.patch.object(switchlib, 'get_port', new=fakegetport):
routerlib.delete_peer_router_lport(self.fake_cluster,
lrouter_port['uuid'],
'whatwever', 'whatever')
def test_update_lrouter_port_ips_add_only(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
routerlib.update_lrouter_port_ips(
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
['10.10.10.254'], [])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
self.assertEqual(['10.10.10.254', '192.168.0.1'],
res_port['ip_addresses'])
def test_update_lrouter_port_ips_remove_only(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1', '10.10.10.254'],
'00:11:22:33:44:55')
routerlib.update_lrouter_port_ips(
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
[], ['10.10.10.254'])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
self.assertEqual(['192.168.0.1'], res_port['ip_addresses'])
def test_update_lrouter_port_ips_add_and_remove(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
routerlib.update_lrouter_port_ips(
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
['10.10.10.254'], ['192.168.0.1'])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
self.assertEqual(['10.10.10.254'], res_port['ip_addresses'])
def test_update_lrouter_port_ips_nonexistent_router_raises(self):
self.assertRaises(
nsx_exc.NsxPluginException, routerlib.update_lrouter_port_ips,
self.fake_cluster, 'boo-router', 'boo-port', [], [])
def test_update_lrouter_port_ips_nsx_exception_raises(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
def raise_nsx_exc(*args, **kwargs):
raise api_exc.NsxApiException()
with mock.patch.object(nsxlib, 'do_request', new=raise_nsx_exc):
self.assertRaises(
nsx_exc.NsxPluginException, routerlib.update_lrouter_port_ips,
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'], [], [])
def test_plug_lrouter_port_patch_attachment(self):
tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
_uuid(),
tenant_id, 'fake-switch',
transport_zones_config)
lport = switchlib.create_lport(self.fake_cluster, lswitch['uuid'],
tenant_id, 'xyz',
'name', 'device_id', True)
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
tenant_id,
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55:66')
result = routerlib.plug_router_port_attachment(
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'],
lport['uuid'], 'PatchAttachment')
self.assertEqual(lport['uuid'],
result['LogicalPortAttachment']['peer_port_uuid'])
def test_plug_lrouter_port_l3_gw_attachment(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55:66')
result = routerlib.plug_router_port_attachment(
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'],
'gw_att', 'L3GatewayAttachment')
self.assertEqual(
'gw_att',
result['LogicalPortAttachment']['l3_gateway_service_uuid'])
def test_plug_lrouter_port_l3_gw_attachment_with_vlan(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
result = routerlib.plug_router_port_attachment(
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'],
'gw_att', 'L3GatewayAttachment', 123)
self.assertEqual(
'gw_att',
result['LogicalPortAttachment']['l3_gateway_service_uuid'])
self.assertEqual(
'123',
result['LogicalPortAttachment']['vlan_id'])
def test_plug_lrouter_port_invalid_attachment_type_raises(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
self.assertRaises(nsx_exc.InvalidAttachmentType,
routerlib.plug_router_port_attachment,
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'], 'gw_att', 'BadType')
def _test_create_router_snat_rule(self, version):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version(version)):
routerlib.create_lrouter_snat_rule(
self.fake_cluster, lrouter['uuid'],
'10.0.0.2', '10.0.0.2', order=200,
match_criteria={'source_ip_addresses': '192.168.0.24'})
rules = routerlib.query_nat_rules(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 1)
def test_create_router_snat_rule_v3(self):
self._test_create_router_snat_rule('3.0')
def test_create_router_snat_rule_v2(self):
self._test_create_router_snat_rule('2.0')
def _test_create_router_dnat_rule(self, version, dest_port=None):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
return_value=version_module.Version(version)):
routerlib.create_lrouter_dnat_rule(
self.fake_cluster, lrouter['uuid'], '192.168.0.2', order=200,
dest_port=dest_port,
match_criteria={'destination_ip_addresses': '10.0.0.3'})
rules = routerlib.query_nat_rules(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 1)
def test_create_router_dnat_rule_v3(self):
self._test_create_router_dnat_rule('3.0')
def test_create_router_dnat_rule_v2(self):
self._test_create_router_dnat_rule('2.0')
def test_create_router_dnat_rule_v2_with_destination_port(self):
self._test_create_router_dnat_rule('2.0', 8080)
def test_create_router_dnat_rule_v3_with_destination_port(self):
self._test_create_router_dnat_rule('3.0', 8080)
def test_create_router_snat_rule_invalid_match_keys_raises(self):
# In this case the version does not make a difference
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: '2.0'):
self.assertRaises(AttributeError,
routerlib.create_lrouter_snat_rule,
self.fake_cluster, lrouter['uuid'],
'10.0.0.2', '10.0.0.2', order=200,
match_criteria={'foo': 'bar'})
def _test_create_router_nosnat_rule(self, version, expected=1):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version(version)):
routerlib.create_lrouter_nosnat_rule(
self.fake_cluster, lrouter['uuid'],
order=100,
match_criteria={'destination_ip_addresses': '192.168.0.0/24'})
rules = routerlib.query_nat_rules(
self.fake_cluster, lrouter['uuid'])
# NoSNAT rules do not exist in V2
self.assertEqual(len(rules), expected)
def test_create_router_nosnat_rule_v2(self):
self._test_create_router_nosnat_rule('2.0', expected=0)
def test_create_router_nosnat_rule_v3(self):
self._test_create_router_nosnat_rule('3.0')
def _prepare_nat_rules_for_delete_tests(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
# v2 or v3 makes no difference for this test
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version('2.0')):
routerlib.create_lrouter_snat_rule(
self.fake_cluster, lrouter['uuid'],
'10.0.0.2', '10.0.0.2', order=220,
match_criteria={'source_ip_addresses': '192.168.0.0/24'})
routerlib.create_lrouter_snat_rule(
self.fake_cluster, lrouter['uuid'],
'10.0.0.3', '10.0.0.3', order=200,
match_criteria={'source_ip_addresses': '192.168.0.2/32'})
routerlib.create_lrouter_dnat_rule(
self.fake_cluster, lrouter['uuid'], '192.168.0.2', order=200,
match_criteria={'destination_ip_addresses': '10.0.0.3'})
return lrouter
def test_delete_router_nat_rules_by_match_on_destination_ip(self):
lrouter = self._prepare_nat_rules_for_delete_tests()
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', 1, 1,
destination_ip_addresses='10.0.0.3')
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 2)
def test_delete_router_nat_rules_by_match_on_source_ip(self):
lrouter = self._prepare_nat_rules_for_delete_tests()
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'], 'SourceNatRule', 1, 1,
source_ip_addresses='192.168.0.2/32')
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 2)
def test_delete_router_nat_rules_by_match_no_match_expected(self):
lrouter = self._prepare_nat_rules_for_delete_tests()
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'], 'SomeWeirdType', 0)
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', 0,
destination_ip_addresses='99.99.99.99')
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
def test_delete_router_nat_rules_by_match_no_match_raises(self):
lrouter = self._prepare_nat_rules_for_delete_tests()
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
self.assertRaises(
nsx_exc.NatRuleMismatch,
routerlib.delete_nat_rules_by_match,
self.fake_cluster, lrouter['uuid'],
'SomeWeirdType', 1, 1)
def test_delete_nat_rules_by_match_len_mismatch_does_not_raise(self):
lrouter = self._prepare_nat_rules_for_delete_tests()
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
deleted_rules = routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'],
'DestinationNatRule',
max_num_expected=1, min_num_expected=1,
raise_on_len_mismatch=False,
destination_ip_addresses='99.99.99.99')
self.assertEqual(0, deleted_rules)
# add an extra rule to emulate a duplicate one
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version('2.0')):
routerlib.create_lrouter_snat_rule(
self.fake_cluster, lrouter['uuid'],
'10.0.0.2', '10.0.0.2', order=220,
match_criteria={'source_ip_addresses': '192.168.0.0/24'})
deleted_rules_2 = routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'], 'SourceNatRule',
min_num_expected=1, max_num_expected=1,
raise_on_len_mismatch=False,
source_ip_addresses='192.168.0.0/24')
self.assertEqual(2, deleted_rules_2)
| apache-2.0 |
kau-masa/Komi_SmartAlarmClock | src/actions/Alert.py | 9 | 1526 | from bottle import Bottle, route, run, template, static_file, get, jinja2_template as template, post, request, response, redirect
import json
import requests
import runtime
import actions
"""
Alert
"""
defaultInputParams = {'sound':'', 'message':'', 'loop':False}
defaultOutputParams = {}
sim_parameters = dict()
# Register actions
def registerAction(user, project, version, sim_id):
sim_parameters['user'] = user
sim_parameters['project'] = project
sim_parameters['version'] = version
sim_parameters['sim_id'] = sim_id
runtime.register_webActions(user, project, version, sim_id, 'Alert', '/home/actions/Alert/')
def start():
inputParams = request.json['input']
request_Id = request.json['requestId']
inputParams = actions.applyDefaultValues(inputParams, defaultInputParams)
jsonObj = '{"message": "' + inputParams['message'] +'", "song": "'+ inputParams['sound'] +'", "loop": "'+ str(inputParams['loop']) +'"}'
runtime.eventQueue.put(jsonObj)
success_url = '{}/api/v1/{}/{}/{}/{}/actions/{}/success'.format(runtime.CRAFT_RUNTIME_SERVER_URL, sim_parameters['user'],sim_parameters['project'],sim_parameters['version'],sim_parameters['sim_id'], request_Id)
r = requests.post(success_url)
return
def cancel():
request_Id = request.json['requestId']
cancel_url = '{}/api/v1/{}/{}/{}/{}/actions/{}/cancelation'.format(runtime.CRAFT_RUNTIME_SERVER_URL, sim_parameters['user'],sim_parameters['project'],sim_parameters['version'],sim_parameters['sim_id'], request_Id)
r = requests.post(cancel_url)
return
| bsd-3-clause |
bigdatauniversity/edx-platform | lms/djangoapps/courseware/management/commands/dump_course_structure.py | 30 | 4044 | """
A Django command that dumps the structure of a course as a JSON object.
The resulting JSON object has one entry for each module in the course:
{
"$module_url": {
"category": "$module_category",
"children": [$module_children_urls... ],
"metadata": {$module_metadata}
},
"$module_url": ....
...
}
"""
import json
from optparse import make_option
from textwrap import dedent
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.inheritance import own_metadata, compute_inherited_metadata
from xblock.fields import Scope
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
FILTER_LIST = ['xml_attributes']
INHERITED_FILTER_LIST = ['children', 'xml_attributes']
class Command(BaseCommand):
"""
Write out to stdout a structural and metadata information for a
course as a JSON object
"""
args = "<course_id>"
help = dedent(__doc__).strip()
option_list = BaseCommand.option_list + (
make_option('--modulestore',
action='store',
default='default',
help='Name of the modulestore'),
make_option('--inherited',
action='store_true',
default=False,
help='Whether to include inherited metadata'),
make_option('--inherited_defaults',
action='store_true',
default=False,
help='Whether to include default values of inherited metadata'),
)
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("course_id not specified")
# Get the modulestore
store = modulestore()
# Get the course data
try:
course_key = CourseKey.from_string(args[0])
except InvalidKeyError:
raise CommandError("Invalid course_id")
course = store.get_course(course_key)
if course is None:
raise CommandError("Invalid course_id")
# Precompute inherited metadata at the course level, if needed:
if options['inherited']:
compute_inherited_metadata(course)
# Convert course data to dictionary and dump it as JSON to stdout
info = dump_module(course, inherited=options['inherited'], defaults=options['inherited_defaults'])
return json.dumps(info, indent=2, sort_keys=True, default=unicode)
def dump_module(module, destination=None, inherited=False, defaults=False):
"""
Add the module and all its children to the destination dictionary in
as a flat structure.
"""
destination = destination if destination else {}
items = own_metadata(module)
filtered_metadata = {k: v for k, v in items.iteritems() if k not in FILTER_LIST}
destination[unicode(module.location)] = {
'category': module.location.category,
'children': [unicode(child) for child in getattr(module, 'children', [])],
'metadata': filtered_metadata,
}
if inherited:
# When calculating inherited metadata, don't include existing
# locally-defined metadata
inherited_metadata_filter_list = list(filtered_metadata.keys())
inherited_metadata_filter_list.extend(INHERITED_FILTER_LIST)
def is_inherited(field):
if field.name in inherited_metadata_filter_list:
return False
elif field.scope != Scope.settings:
return False
elif defaults:
return True
else:
return field.values != field.default
inherited_metadata = {field.name: field.read_json(module) for field in module.fields.values() if is_inherited(field)}
destination[unicode(module.location)]['inherited_metadata'] = inherited_metadata
for child in module.get_children():
dump_module(child, destination, inherited, defaults)
return destination
| agpl-3.0 |
jihyun-kim/heekscnc | pycnc/wxNiceTextCtrl.py | 25 | 2065 | import wx
import HeeksCNC
class DoubleCtrl(wx.TextCtrl):
def __init__(self, parent, id = wx.ID_ANY, factor = 1.0):
wx.TextCtrl.__init__(self, parent, id)
self.factor = factor
def GetValue(self):
try:
return float(wx.TextCtrl.GetValue(self))/self.factor
except:
return 0.0
def DoubleToString(self, value):
return str(value * self.factor)
def SetValue(self, value):
wx.TextCtrl.SetValue(self, self.DoubleToString(value))
class LengthCtrl(DoubleCtrl):
def __init__(self, parent, id = wx.ID_ANY):
factor = 1.0/HeeksCNC.cad.get_view_units()
DoubleCtrl.__init__(self, parent, id, factor)
class GeomCtrl(wx.TextCtrl):
# for now this is just a list of profile names with quotes around them and spaces between them, but later it might have a diagram showing the geometry
def __init__(self, parent, id):
wx.TextCtrl.__init__(self, parent, id)
def GetGeomList(self):
str = wx.TextCtrl.GetValue(self)
str = str.replace('\\', '/')
s = ""
geom_list = []
length = len(str)
name_started = False
for i in range(0, length):
if str[i] == '"':
if name_started:
geom_list.append(s)
s = ""
name_started = False
else:
name_started = True
elif str[i] == " " and (name_started == False):
if len(s)>0:
geom_list.append(s)
s = ""
else:
s += str[i]
if len(s)>0:
geom_list.append(s)
s = ""
return geom_list
def SetFromGeomList(self, geom_list):
first = True
str = ""
for geom in geom_list:
if first == False:
str = str + " "
else:
first = False
str += geom
wx.TextCtrl.SetValue(self, str)
| bsd-3-clause |
z0by/django | django/contrib/gis/utils/ogrinspect.py | 73 | 9136 | """
This module is for inspecting OGR data sources and generating either
models for GeoDjango and/or mapping dictionaries for use with the
`LayerMapping` utility.
"""
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTInteger64, OFTReal, OFTString,
OFTTime,
)
from django.utils import six
from django.utils.six.moves import zip
def mapping(data_source, geom_name='geom', layer_key=0, multi_geom=False):
"""
Given a DataSource, generates a dictionary that may be used
for invoking the LayerMapping utility.
Keyword Arguments:
`geom_name` => The name of the geometry field to use for the model.
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
"""
if isinstance(data_source, six.string_types):
# Instantiating the DataSource from the string.
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Creating the dictionary.
_mapping = {}
# Generating the field name for each field in the layer.
for field in data_source[layer_key].fields:
mfield = field.lower()
if mfield[-1:] == '_':
mfield += 'field'
_mapping[mfield] = field
gtype = data_source[layer_key].geom_type
if multi_geom:
gtype.to_multi()
_mapping[geom_name] = str(gtype).upper()
return _mapping
def ogrinspect(*args, **kwargs):
"""
Given a data source (either a string or a DataSource object) and a string
model name this function will generate a GeoDjango model.
Usage:
>>> from django.contrib.gis.utils import ogrinspect
>>> ogrinspect('/path/to/shapefile.shp','NewModel')
...will print model definition to stout
or put this in a python script and use to redirect the output to a new
model like:
$ python generate_model.py > myapp/models.py
# generate_model.py
from django.contrib.gis.utils import ogrinspect
shp_file = 'data/mapping_hacks/world_borders.shp'
model_name = 'WorldBorders'
print(ogrinspect(shp_file, model_name, multi_geom=True, srid=4326,
geom_name='shapes', blank=True))
Required Arguments
`datasource` => string or DataSource object to file pointer
`model name` => string of name of new model class to create
Optional Keyword Arguments
`geom_name` => For specifying the model name for the Geometry Field.
Otherwise will default to `geom`
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`srid` => The SRID to use for the Geometry Field. If it can be determined,
the SRID of the datasource is used.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
`name_field` => String - specifies a field name to return for the
`__unicode__`/`__str__` function (which will be generated if specified).
`imports` => Boolean (default: True) - set to False to omit the
`from django.contrib.gis.db import models` code from the
autogenerated models thus avoiding duplicated imports when building
more than one model by batching ogrinspect()
`decimal` => Boolean or sequence (default: False). When set to True
all generated model fields corresponding to the `OFTReal` type will
be `DecimalField` instead of `FloatField`. A sequence of specific
field names to generate as `DecimalField` may also be used.
`blank` => Boolean or sequence (default: False). When set to True all
generated model fields will have `blank=True`. If the user wants to
give specific fields to have blank, then a list/tuple of OGR field
names may be used.
`null` => Boolean (default: False) - When set to True all generated
model fields will have `null=True`. If the user wants to specify
give specific fields to have null, then a list/tuple of OGR field
names may be used.
Note: This routine calls the _ogrinspect() helper to do the heavy lifting.
"""
return '\n'.join(s for s in _ogrinspect(*args, **kwargs))
def _ogrinspect(data_source, model_name, geom_name='geom', layer_key=0, srid=None,
multi_geom=False, name_field=None, imports=True,
decimal=False, blank=False, null=False):
"""
Helper routine for `ogrinspect` that generates GeoDjango models corresponding
to the given data source. See the `ogrinspect` docstring for more details.
"""
# Getting the DataSource
if isinstance(data_source, six.string_types):
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Getting the layer corresponding to the layer key and getting
# a string listing of all OGR fields in the Layer.
layer = data_source[layer_key]
ogr_fields = layer.fields
# Creating lists from the `null`, `blank`, and `decimal`
# keyword arguments.
def process_kwarg(kwarg):
if isinstance(kwarg, (list, tuple)):
return [s.lower() for s in kwarg]
elif kwarg:
return [s.lower() for s in ogr_fields]
else:
return []
null_fields = process_kwarg(null)
blank_fields = process_kwarg(blank)
decimal_fields = process_kwarg(decimal)
# Gets the `null` and `blank` keywords for the given field name.
def get_kwargs_str(field_name):
kwlist = []
if field_name.lower() in null_fields:
kwlist.append('null=True')
if field_name.lower() in blank_fields:
kwlist.append('blank=True')
if kwlist:
return ', ' + ', '.join(kwlist)
else:
return ''
# For those wishing to disable the imports.
if imports:
yield '# This is an auto-generated Django model module created by ogrinspect.'
yield 'from django.contrib.gis.db import models'
yield ''
yield 'class %s(models.Model):' % model_name
for field_name, width, precision, field_type in zip(
ogr_fields, layer.field_widths, layer.field_precisions, layer.field_types):
# The model field name.
mfield = field_name.lower()
if mfield[-1:] == '_':
mfield += 'field'
# Getting the keyword args string.
kwargs_str = get_kwargs_str(field_name)
if field_type is OFTReal:
# By default OFTReals are mapped to `FloatField`, however, they
# may also be mapped to `DecimalField` if specified in the
# `decimal` keyword.
if field_name.lower() in decimal_fields:
yield ' %s = models.DecimalField(max_digits=%d, decimal_places=%d%s)' % (
mfield, width, precision, kwargs_str
)
else:
yield ' %s = models.FloatField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger:
yield ' %s = models.IntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger64:
yield ' %s = models.BigIntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTString:
yield ' %s = models.CharField(max_length=%s%s)' % (mfield, width, kwargs_str)
elif field_type is OFTDate:
yield ' %s = models.DateField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTDateTime:
yield ' %s = models.DateTimeField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTTime:
yield ' %s = models.TimeField(%s)' % (mfield, kwargs_str[2:])
else:
raise TypeError('Unknown field type %s in %s' % (field_type, mfield))
# TODO: Autodetection of multigeometry types (see #7218).
gtype = layer.geom_type
if multi_geom:
gtype.to_multi()
geom_field = gtype.django
# Setting up the SRID keyword string.
if srid is None:
if layer.srs is None:
srid_str = 'srid=-1'
else:
srid = layer.srs.srid
if srid is None:
srid_str = 'srid=-1'
elif srid == 4326:
# WGS84 is already the default.
srid_str = ''
else:
srid_str = 'srid=%s' % srid
else:
srid_str = 'srid=%s' % srid
yield ' %s = models.%s(%s)' % (geom_name, geom_field, srid_str)
yield ' objects = models.GeoManager()'
if name_field:
yield ''
yield ' def __%s__(self): return self.%s' % (
'str' if six.PY3 else 'unicode', name_field)
| bsd-3-clause |
sharma1nitish/phantomjs | src/qt/qtwebkit/Tools/TestResultServer/generate_builders_json.py | 119 | 4429 | #!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import logging
import optparse
import os
import urllib2
# FIXME: See if Tools/Scripts/webkitpy/layout_tests/port/builders.py should also read
# the output json file here as its data source.
def master_json_url(master_url):
return master_url + '/json/builders'
def builder_json_url(master_url, builder):
return master_json_url(master_url) + '/' + urllib2.quote(builder)
def cached_build_json_url(master_url, builder, build_number):
return builder_json_url(master_url, builder) + '/builds/' + str(build_number)
def fetch_json(url):
logging.debug('Fetching %s' % url)
return json.load(urllib2.urlopen(url))
def insert_builder_and_test_data(masters):
for master in masters:
master_url = master['url']
tests_object = {}
master['tests'] = tests_object
for builder in fetch_json(master_json_url(master_url)):
build_data = fetch_json(builder_json_url(master_url, builder))
cached_builds = build_data['cachedBuilds']
current_builds = build_data['currentBuilds']
latest_cached_build = cached_builds.pop()
while latest_cached_build in current_builds and len(cached_builds):
latest_cached_build = cached_builds.pop()
for step in fetch_json(cached_build_json_url(master_url, builder, latest_cached_build))['steps']:
step_name = step['name']
if step_name != 'layout-test':
continue
# Adjust for backwards compatibility
step_name = 'layout-tests'
if step_name not in tests_object:
tests_object[step_name] = {'builders': []}
tests_object[step_name]['builders'].append(builder)
for step_name in tests_object:
tests_object[step_name]['builders'].sort()
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('-v', '--verbose', action='store_true', default=False, help='Print debug logging')
options, args = option_parser.parse_args()
logging.getLogger().setLevel(logging.DEBUG if options.verbose else logging.INFO)
masters = [
{'name': 'webkit.org', 'url': 'http://build.webkit.org'},
]
insert_builder_and_test_data(masters)
json_file_prefix = ('// This file is auto-generated by Tools/TestResultServer/generate_builders_json.py. It should not be manually modified.\n'
'// It uses jsonp instead of proper json because we want to be able to load it from a file URL for local testing.\n'
'LOAD_BUILDBOT_DATA(')
json_file_suffix = ');\n';
json_file = open(os.path.join('static-dashboards', 'builders.jsonp'), 'w')
json_file.write(json_file_prefix + json.dumps(masters, separators=(', ', ': '), indent=4, sort_keys=True) + json_file_suffix)
if __name__ == "__main__":
main()
| bsd-3-clause |
pcm17/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 62 | 3753 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
me-systeme/gsv8pypi | GSV6_FrameRouter.py | 1 | 5296 | # -*- coding: utf-8 -*-
__author__ = 'Dennis Rump'
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Dennis Rump
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Hiermit wird unentgeltlich, jeder Person, die eine Kopie der Software
# und der zugehörigen Dokumentationen (die "Software") erhält, die
# Erlaubnis erteilt, uneingeschränkt zu benutzen, inklusive und ohne
# Ausnahme, dem Recht, sie zu verwenden, kopieren, ändern, fusionieren,
# verlegen, verbreiten, unter-lizenzieren und/oder zu verkaufen, und
# Personen, die diese Software erhalten, diese Rechte zu geben, unter
# den folgenden Bedingungen:
#
# Der obige Urheberrechtsvermerk und dieser Erlaubnisvermerk sind in
# alle Kopien oder Teilkopien der Software beizulegen.
#
# DIE SOFTWARE WIRD OHNE JEDE AUSDRÜCKLICHE ODER IMPLIZIERTE GARANTIE
# BEREITGESTELLT, EINSCHLIESSLICH DER GARANTIE ZUR BENUTZUNG FÜR DEN
# VORGESEHENEN ODER EINEM BESTIMMTEN ZWECK SOWIE JEGLICHER
# RECHTSVERLETZUNG, JEDOCH NICHT DARAUF BESCHRÄNKT. IN KEINEM FALL SIND
# DIE AUTOREN ODER COPYRIGHTINHABER FÜR JEGLICHEN SCHADEN ODER SONSTIGE
# ANSPRUCH HAFTBAR ZU MACHEN, OB INFOLGE DER ERFÜLLUNG VON EINEM
# VERTRAG, EINEM DELIKT ODER ANDERS IM ZUSAMMENHANG MIT DER BENUTZUNG
# ODER SONSTIGE VERWENDUNG DER SOFTWARE ENTSTANDEN.
#
###############################################################################
import logging
import threading
from Queue import Queue
from GSV6_MessFrameHandler import MessFrameHandler
class FrameRouter(threading.Thread):
lock = threading.Lock()
#def __init__(self, frameQueue, antwortQueue, messertRotatingQueue, gsv6Lib):
def __init__(self, frameQueue, antwortQueue, _lastMesswert, gsv6Lib):
threading.Thread.__init__(self)
self.frameQueue = frameQueue
self.antwortQueue = antwortQueue
# self.messertRotatingQueue = messertRotatingQueue
self.lastMesswert = _lastMesswert
self.gsv6 = gsv6Lib
self.running = False
# self.messFrameEventHandler = MessFrameHandler(self.messertRotatingQueue, self.gsv6)
self.messFrameEventHandler = MessFrameHandler(self.lastMesswert, self.gsv6)
# self.antwortFrameEventHandler = AntwortFrameHandler(self.gsv6, self.antwortQueue, self.messFrameEventHandler)
# fallback, this flag kills this thread if main thread killed
self.daemon = True
def run(self):
# arbeits Thread: router -> routen von AntwortFrames und MessFrames
FrameRouter.lock.acquire()
self.running = True
FrameRouter.lock.release()
logging.getLogger('gsv8.FrameRouter').info('started')
# enter rooter loop
while self.running:
try:
# newFrame = self.frameQueue.popleft()
newFrame = self.frameQueue.get()
except IndexError:
pass
except Queue.Empty:
pass
else:
logging.getLogger('gsv8.FrameRouter').debug('new Frame: ' + newFrame.toString())
if newFrame.getFrameType() == 0:
# MesswertFrame
logging.getLogger('gsv8.FrameRouter').debug('Messwert erhalten')
self.messFrameEventHandler.computeFrame(newFrame)
elif newFrame.getFrameType() == 1:
logging.getLogger('gsv8').debug("Antwort eralten.")
# AntwortFrame
# self.antwortFrameEventHandler.computeFrame(newFrame)
self.antwortQueue.put(newFrame)
else:
# error
logging.getLogger('gsv8.FrameRouter').debug(
'nothing to do with an FrameType != Messwert/Antwort')
logging.getLogger('gsv8.FrameRouter').debug('exit')
def stop(self):
FrameRouter.lock.acquire()
self.running = False
FrameRouter.lock.release()
def startCSVRecording(self, csvFilepath, prefix):
self.messFrameEventHandler.startRecording(csvFilepath, prefix)
def stopCSVRecording(self):
self.messFrameEventHandler.stopRecording()
def isRecording(self):
return self.messFrameEventHandler.doRecording | mit |
skycucumber/restful | python/venv/lib/python2.7/site-packages/pip/_vendor/distlib/version.py | 166 | 21803 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Implementation of a flexible versioning scheme providing support for PEP-386,
distribute-compatible and semantic versioning.
"""
import logging
import re
from .compat import string_types
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
'LegacyVersion', 'LegacyMatcher',
'SemanticVersion', 'SemanticMatcher',
'UnsupportedVersionError', 'get_scheme']
logger = logging.getLogger(__name__)
class UnsupportedVersionError(ValueError):
"""This is an unsupported version."""
pass
class Version(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
class Matcher(object):
version_class = None
dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?")
comp_re = re.compile(r'^(<=|>=|<|>|!=|==|~=)?\s*([^\s,]+)$')
num_re = re.compile(r'^\d+(\.\d+)*$')
# value is either a callable or the name of a method
_operators = {
'<': lambda v, c, p: v < c,
'>': lambda v, c, p: v > c,
'<=': lambda v, c, p: v == c or v < c,
'>=': lambda v, c, p: v == c or v > c,
'==': lambda v, c, p: v == c,
# by default, compatible => >=.
'~=': lambda v, c, p: v == c or v > c,
'!=': lambda v, c, p: v != c,
}
def __init__(self, s):
if self.version_class is None:
raise ValueError('Please specify a version class')
self._string = s = s.strip()
m = self.dist_re.match(s)
if not m:
raise ValueError('Not valid: %r' % s)
groups = m.groups('')
self.name = groups[0].strip()
self.key = self.name.lower() # for case-insensitive comparisons
clist = []
if groups[2]:
constraints = [c.strip() for c in groups[2].split(',')]
for c in constraints:
m = self.comp_re.match(c)
if not m:
raise ValueError('Invalid %r in %r' % (c, s))
groups = m.groups()
op = groups[0] or '~='
s = groups[1]
if s.endswith('.*'):
if op not in ('==', '!='):
raise ValueError('\'.*\' not allowed for '
'%r constraints' % op)
# Could be a partial version (e.g. for '2.*') which
# won't parse as a version, so keep it as a string
vn, prefix = s[:-2], True
if not self.num_re.match(vn):
# Just to check that vn is a valid version
self.version_class(vn)
else:
# Should parse as a version, so we can create an
# instance for the comparison
vn, prefix = self.version_class(s), False
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: Strring or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True
@property
def exact_version(self):
result = None
if len(self._parts) == 1 and self._parts[0][0] == '==':
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if type(self) != type(other) or self.name != other.name:
raise TypeError('cannot compare %s and %s' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self.key == other.key and self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.key) + hash(self._parts)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
PEP426_VERSION_RE = re.compile(r'^(\d+\.\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
r'(-(\d+(\.\d+)?))?$')
def _pep426_key(s):
s = s.strip()
m = PEP426_VERSION_RE.match(s)
if not m:
raise UnsupportedVersionError('Not a valid version: %s' % s)
groups = m.groups()
nums = tuple(int(v) for v in groups[0].split('.'))
while len(nums) > 1 and nums[-1] == 0:
nums = nums[:-1]
pre = groups[3:5]
post = groups[6:8]
dev = groups[9:11]
local = groups[12]
if pre == (None, None):
pre = ()
else:
pre = pre[0], int(pre[1])
if post == (None, None):
post = ()
else:
post = post[0], int(post[1])
if dev == (None, None):
dev = ()
else:
dev = dev[0], int(dev[1])
if local is None:
local = ()
else:
local = tuple([int(s) for s in local.split('.')])
if not pre:
# either before pre-release, or final release and after
if not post and dev:
# before pre-release
pre = ('a', -1) # to sort before a0
else:
pre = ('z',) # to sort after all pre-releases
# now look at the state of post and dev.
if not post:
post = ('_',) # sort before 'a'
if not dev:
dev = ('final',)
#print('%s -> %s' % (s, m.groups()))
return nums, pre, post, dev, local
_normalized_key = _pep426_key
class NormalizedVersion(Version):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # mininum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def parse(self, s):
result = _normalized_key(s)
# _normalized_key loses trailing zeroes in the release
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
# However, PEP 440 prefix matching needs it: for example,
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
m = PEP426_VERSION_RE.match(s) # must succeed
groups = m.groups()
self._release_clause = tuple(int(v) for v in groups[0].split('.'))
return result
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
@property
def is_prerelease(self):
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
def _match_prefix(x, y):
x = str(x)
y = str(y)
if x == y:
return True
if not x.startswith(y):
return False
n = len(y)
return x[n] == '.'
class NormalizedMatcher(Matcher):
version_class = NormalizedVersion
# value is either a callable or the name of a method
_operators = {
'~=': '_match_compatible',
'<': '_match_lt',
'>': '_match_gt',
'<=': '_match_le',
'>=': '_match_ge',
'==': '_match_eq',
'!=': '_match_ne',
}
def _match_lt(self, version, constraint, prefix):
if version >= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_gt(self, version, constraint, prefix):
if version <= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_le(self, version, constraint, prefix):
return version <= constraint
def _match_ge(self, version, constraint, prefix):
return version >= constraint
def _match_eq(self, version, constraint, prefix):
if not prefix:
result = (version == constraint)
else:
result = _match_prefix(version, constraint)
return result
def _match_ne(self, version, constraint, prefix):
if not prefix:
result = (version != constraint)
else:
result = not _match_prefix(version, constraint)
return result
def _match_compatible(self, version, constraint, prefix):
if version == constraint:
return True
if version < constraint:
return False
release_clause = constraint._release_clause
if len(release_clause) > 1:
release_clause = release_clause[:-1]
pfx = '.'.join([str(i) for i in release_clause])
return _match_prefix(version, pfx)
_REPLACEMENTS = (
(re.compile('[.+-]$'), ''), # remove trailing puncts
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
(re.compile('^[.-]'), ''), # remove leading puncts
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
(re.compile(r'\b(pre-alpha|prealpha)\b'),
'pre.alpha'), # standardise
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
)
_SUFFIX_REPLACEMENTS = (
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
(re.compile('[,*")([\]]'), ''), # remove unwanted chars
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\.$'), ''), # trailing '.'
)
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result
def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is pobably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs
#
# Legacy version processing (distribute-compatible)
#
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
_VERSION_REPLACE = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
'': None,
'.': None,
}
def _legacy_key(s):
def get_parts(s):
result = []
for p in _VERSION_PART.split(s.lower()):
p = _VERSION_REPLACE.get(p, p)
if p:
if '0' <= p[:1] <= '9':
p = p.zfill(8)
else:
p = '*' + p
result.append(p)
result.append('*final')
return result
result = []
for p in get_parts(s):
if p.startswith('*'):
if p < '*final':
while result and result[-1] == '*final-':
result.pop()
while result and result[-1] == '00000000':
result.pop()
result.append(p)
return tuple(result)
class LegacyVersion(Version):
def parse(self, s):
return _legacy_key(s)
PREREL_TAGS = set(
['*a', '*alpha', '*b', '*beta', '*c', '*rc', '*r', '*@', '*pre']
)
@property
def is_prerelease(self):
return any(x in self.PREREL_TAGS for x in self._parts)
class LegacyMatcher(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile('^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
class SemanticVersion(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
class SemanticMatcher(Matcher):
version_class = SemanticVersion
class VersionScheme(object):
def __init__(self, key, matcher, suggester=None):
self.key = key
self.matcher = matcher
self.suggester = suggester
def is_valid_version(self, s):
try:
self.matcher.version_class(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_matcher(self, s):
try:
self.matcher(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
return self.is_valid_matcher('dummy_name (%s)' % s)
def suggest(self, s):
if self.suggester is None:
result = None
else:
result = self.suggester(s)
return result
_SCHEMES = {
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
_suggest_normalized_version),
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
_suggest_semantic_version),
}
_SCHEMES['default'] = _SCHEMES['normalized']
def get_scheme(name):
if name not in _SCHEMES:
raise ValueError('unknown scheme name: %r' % name)
return _SCHEMES[name]
| gpl-2.0 |
hashems/Mobile-Cloud-Development-Projects | appengine/flexible/mailjet/main.py | 7 | 2148 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START app]
import logging
import os
from flask import Flask, render_template, request
# [START config]
import mailjet_rest
MAILJET_API_KEY = os.environ['MAILJET_API_KEY']
MAILJET_API_SECRET = os.environ['MAILJET_API_SECRET']
MAILJET_SENDER = os.environ['MAILJET_SENDER']
# [END config]
app = Flask(__name__)
# [START send_message]
def send_message(to):
client = mailjet_rest.Client(
auth=(MAILJET_API_KEY, MAILJET_API_SECRET))
data = {
'FromEmail': MAILJET_SENDER,
'FromName': 'App Engine Flex Mailjet Sample',
'Subject': 'Example email.',
'Text-part': 'This is an example email.',
'Html-part': 'This is an <i>example</i> email.',
'Recipients': [{'Email': to}]
}
result = client.send.create(data=data)
return result.json()
# [END send_message]
@app.route('/')
def index():
return render_template('index.html')
@app.route('/send/email', methods=['POST'])
def send_email():
to = request.form.get('to')
result = send_message(to)
return 'Email sent, response: <pre>{}</pre>'.format(result)
@app.errorhandler(500)
def server_error(e):
logging.exception('An error occurred during a request.')
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
# [END app]
| apache-2.0 |
jmargeta/scikit-learn | sklearn/utils/class_weight.py | 3 | 2062 | # Authors: Andreas Mueller
# License: Simplified BSD
import numpy as np
from .fixes import bincount
def compute_class_weight(class_weight, classes, y_ind):
"""Estimate class weights for unbalanced datasets.
Parameters
----------
class_weight : dict, 'auto' or None
If 'auto', class weights will be given inverse proportional
to the frequency of the class in the data.
If a dictionary is given, keys are classes and values
are corresponding class weights.
If None is given, the class weights will be uniform.
classes : ndarray
Array of the classes occuring in the data, as given by
``np.unique(y_org)`` with ``y_org`` the original class labels.
y_ind : array-like, shape=(n_samples,), dtype=int
Array of class indices per sample;
0 <= y_ind[i] < n_classes for i in range(n_samples).
Returns
-------
class_weight_vect : ndarray, shape=(n_classes,)
Array with class_weight_vect[i] the weight for i-th class
(as determined by sorting).
"""
if class_weight is None or len(class_weight) == 0:
# uniform class weights
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
elif class_weight == 'auto':
# inversely proportional to the number of samples in the class
counts = bincount(y_ind, minlength=len(classes))
counts = np.maximum(counts, 1)
weight = 1. / counts
weight *= classes.shape[0] / np.sum(weight)
else:
# user-defined dictionary
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
if not isinstance(class_weight, dict):
raise ValueError("class_weight must be dict, 'auto', or None,"
" got: %r" % class_weight)
for c in class_weight:
i = np.searchsorted(classes, c)
if classes[i] != c:
raise ValueError("Class label %d not present." % c)
else:
weight[i] = class_weight[c]
return weight
| bsd-3-clause |
jbair34/moose | python/PresentationBuilder/slides/DjangoWikiSlide.py | 19 | 3421 | import os, re, urllib
from ..images import DjangoWikiImage
from ..slides import RemarkSlide
##
# A slide for wiki content from a Djanjo Wiki (https://github.com/django-wiki/django-wiki)
class DjangoWikiSlide(RemarkSlide):
@staticmethod
def validParams():
params = RemarkSlide.validParams()
return params
# When reading the markdown these replacements are made
replace = [('&', '&'), ('<', '<'), ('>', '>'), ('\r\n', '\n'), ('"', '"'), ('\r', '')]
##
# Constructor
# @param id The numeric slide id
# @param markdown The raw markdown for this slide
# @param kwargs Optional key, value pairs
def __init__(self, params):
RemarkSlide.__init__(self, params, image_type = 'DjangoWikiImage')
# Storage for comments
self.__comments = []
##
# Parse the markdown retrieved from a Django wiki
# @param markdown The raw markdown for the current slide
def parse(self, markdown):
markdown = RemarkSlide.parse(self, markdown)
# Replace special characters
for item in self.replace:
markdown = markdown.replace(item[0], item[1])
# Equations
pattern = re.compile('(\${2,})(.*?)\${2,}', re.S)
for m in pattern.finditer(markdown):
# Inline
if m.group(1) == '$$':
markdown = markdown.replace(m.group(0), '`$ ' + m.group(2) + ' $`')
elif m.group(1) == '$$$':
markdown = markdown.replace(m.group(0), '`$$ ' + m.group(2) + ' $$`')
else:
print 'ERROR parsing equation on slide', self.name()
print ' ', m.group(2)
# Handle in-list code
# The Django-wiki doesn't seem to support highlighted code blocks nested under a list item,
# indenting the code block removes the code block, so all blocks in the wiki must be as follows:
#
# - A list item
# ```c++
# unsigned int - = 0
# ```
#
# However, RemarkJS does support indented code blocks, but these blocks need to be indented by
# four spaces. The following preforms this indenting.
regex = re.compile(r'(^\s*[\*-].*?\n)(```.*?```\s*\n)', re.MULTILINE|re.DOTALL)
markdown = regex.sub(self.__subIndentListNestedCode, markdown)
# Extract comments
markdown = re.sub(r'(?<![^\s.])(\s*\[\]\(\?\?\?\s*(.*?)\))', self.__subStoreComment, markdown, re.DOTALL)
# Add the comments at the end
if self.__comments:
prefix = '\n'
if len(self.__comments) > 1:
prefix = '\n- '
markdown += '\n???\n'
for c in self.__comments:
markdown += prefix + c
# Return the markdown
return markdown
##
# Substitution function for extracting Remark comments (private)
# @param The re Match object, see re.sub
def __subStoreComment(self, match):
self.__comments.append(match.group(2).strip())
return ''
##
# Substitution function for nesting code in lists (private)
# @param The re Match object, see re.sub
def __subIndentListNestedCode(self, match):
# Perform an additional match to check if the ``` directly below the list item
sub_match = re.search(r'(^\s*[\*-].*?\n)(```)', match.group(0), re.MULTILINE)
# If so, then build the indented output
if sub_match:
output = '\n' + match.group(1)
for line in match.group(2).split('\n')[0:-1]: #[0:-1] removes the empty string at the end of the list
output += ' '*4 + line + '\n'
return output
else:
return match.group(0)
| lgpl-2.1 |
rushiagr/keystone | keystone/common/cache/backends/noop.py | 30 | 1426 | # Copyright 2013 Metacloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dogpile.cache import api
NO_VALUE = api.NO_VALUE
class NoopCacheBackend(api.CacheBackend):
"""A no op backend as a default caching backend.
The no op backend is provided as the default caching backend for keystone
to ensure that ``dogpile.cache.memory`` is not used in any real-world
circumstances unintentionally. ``dogpile.cache.memory`` does not have a
mechanism to cleanup it's internal dict and therefore could cause run-away
memory utilization.
"""
def __init__(self, *args):
return
def get(self, key):
return NO_VALUE
def get_multi(self, keys):
return [NO_VALUE for x in keys]
def set(self, key, value):
return
def set_multi(self, mapping):
return
def delete(self, key):
return
def delete_multi(self, keys):
return
| apache-2.0 |
wdzhou/mantid | Framework/PythonInterface/test/python/mantid/geometry/UnitCellTest.py | 3 | 3081 | from __future__ import (absolute_import, division, print_function)
import unittest
import testhelpers
from mantid.geometry import UnitCell, AngleUnits
from mantid.kernel import V3D
import numpy as np
class UnitCellTest(unittest.TestCase):
def test_invalid_parameters_throw(self):
self.assertRaises(RuntimeError, UnitCell, 0, 0, 0, 0, 0, 0)
def test_simple_constructor(self):
u1 = UnitCell()
self.assertEquals(u1.a1(), 1)
self.assertEquals(u1.alpha(), 90)
u2 = UnitCell(3,4,5)
self.assertAlmostEquals(u2.b1(),1./3., 10)
self.assertAlmostEquals(u2.alphastar(), 90, 10)
u4 = u2
self.assertAlmostEquals(u4.volume(),1./u2.recVolume(),10)
u2.seta(3)
self.assertAlmostEquals(u2.a(),3,10)
def test_numpy_array_conversion(self):
row0 = (0.162546756312, 0.00815256992072, -0.00145274558861)
row1 = (row0[1], 0.028262965555, 0.00102046431298)
row2 = (row0[2], row1[2], 0.0156808990098 )
gstar = np.array( [row0,row1,row2] )
u = UnitCell()
testhelpers.assertRaisesNothing(self, u.recalculateFromGstar, gstar)
self._check_cell(u)
def test_to_string(self):
unit = UnitCell(3,3,3)
expected_str = "UnitCell with lattice parameters: a = 3 b = 3 c = 3 "\
"alpha = 90 beta = 90 gamma = 90"
expected_repr = "UnitCell(3, 3, 3, 90, 90, 90)"
self.assertEqual(expected_str, str(unit))
self.assertEqual(expected_repr, unit.__repr__())
newUnit = eval(unit.__repr__())
self.assertEqual(unit.a(), newUnit.a())
self.assertEqual(unit.b(), newUnit.b())
self.assertEqual(unit.c(), newUnit.c())
self.assertEqual(unit.alpha(), newUnit.alpha())
self.assertEqual(unit.beta(), newUnit.beta())
self.assertEqual(unit.gamma(), newUnit.gamma())
def _check_cell(self, cell):
self.assertAlmostEqual(cell.a(),2.5,10)
self.assertAlmostEqual(cell.b(),6,10)
self.assertAlmostEqual(cell.c(),8,10)
self.assertAlmostEqual(cell.alpha(),93,10)
self.assertAlmostEqual(cell.beta(),88,10)
self.assertAlmostEqual(cell.gamma(),97,10)
# get the some elements of the B matrix
self.assertEquals(type(cell.getB()), np.ndarray)
self.assertAlmostEqual(cell.getB()[0][0],0.403170877311,10)
self.assertAlmostEqual(cell.getB()[2][0],0.0,10)
self.assertAlmostEqual(cell.getB()[0][2],-0.00360329991666,10)
self.assertAlmostEqual(cell.getB()[2][2],0.125,10)
# d spacing for direct lattice at (1,1,1) (will automatically check dstar)
self.assertAlmostEqual(cell.d(1.,1.,1.),2.1227107587,10)
self.assertAlmostEqual(cell.d(V3D(1.,1.,1.)),2.1227107587,10)
# angle
self.assertAlmostEqual(cell.recAngle(1,1,1,1,0,0,AngleUnits.Radians),0.471054990614,10)
self.assertEquals(type(cell.getG()), np.ndarray)
self.assertEquals(type(cell.getGstar()), np.ndarray)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
FRidh/scipy | scipy/spatial/tests/test_kdtree.py | 15 | 24484 | # Copyright Anne M. Archibald 2008
# Released under the scipy license
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_, run_module_suite)
import numpy as np
from scipy.spatial import KDTree, Rectangle, distance_matrix, cKDTree
from scipy.spatial.ckdtree import cKDTreeNode
from scipy.spatial import minkowski_distance as distance
class ConsistencyTests:
def test_nearest(self):
x = self.x
d, i = self.kdtree.query(x, 1)
assert_almost_equal(d**2,np.sum((x-self.data[i])**2))
eps = 1e-8
assert_(np.all(np.sum((self.data-x[np.newaxis,:])**2,axis=1) > d**2-eps))
def test_m_nearest(self):
x = self.x
m = self.m
dd, ii = self.kdtree.query(x, m)
d = np.amax(dd)
i = ii[np.argmax(dd)]
assert_almost_equal(d**2,np.sum((x-self.data[i])**2))
eps = 1e-8
assert_equal(np.sum(np.sum((self.data-x[np.newaxis,:])**2,axis=1) < d**2+eps),m)
def test_points_near(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d**2,np.sum((x-self.data[near_i])**2))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(np.sum((self.data-x[np.newaxis,:])**2,axis=1) < d**2+eps),hits)
def test_points_near_l1(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=1, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d,distance(x,self.data[near_i],1))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(distance(self.data,x,1) < d+eps),hits)
def test_points_near_linf(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=np.inf, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d,distance(x,self.data[near_i],np.inf))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(distance(self.data,x,np.inf) < d+eps),hits)
def test_approx(self):
x = self.x
k = self.k
eps = 0.1
d_real, i_real = self.kdtree.query(x, k)
d, i = self.kdtree.query(x, k, eps=eps)
assert_(np.all(d <= d_real*(1+eps)))
class test_random(ConsistencyTests):
def setUp(self):
self.n = 100
self.m = 4
np.random.seed(1234)
self.data = np.random.randn(self.n, self.m)
self.kdtree = KDTree(self.data,leafsize=2)
self.x = np.random.randn(self.m)
self.d = 0.2
self.k = 10
class test_random_far(test_random):
def setUp(self):
test_random.setUp(self)
self.x = np.random.randn(self.m)+10
class test_small(ConsistencyTests):
def setUp(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = KDTree(self.data)
self.n = self.kdtree.n
self.m = self.kdtree.m
np.random.seed(1234)
self.x = np.random.randn(3)
self.d = 0.5
self.k = 4
def test_nearest(self):
assert_array_equal(
self.kdtree.query((0,0,0.1), 1),
(0.1,0))
def test_nearest_two(self):
assert_array_equal(
self.kdtree.query((0,0,0.1), 2),
([0.1,0.9],[0,1]))
class test_small_nonleaf(test_small):
def setUp(self):
test_small.setUp(self)
self.kdtree = KDTree(self.data,leafsize=1)
class test_small_compiled(test_small):
def setUp(self):
test_small.setUp(self)
self.kdtree = cKDTree(self.data)
class test_small_nonleaf_compiled(test_small):
def setUp(self):
test_small.setUp(self)
self.kdtree = cKDTree(self.data,leafsize=1)
class test_random_compiled(test_random):
def setUp(self):
test_random.setUp(self)
self.kdtree = cKDTree(self.data)
class test_random_far_compiled(test_random_far):
def setUp(self):
test_random_far.setUp(self)
self.kdtree = cKDTree(self.data)
class test_vectorization:
def setUp(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = KDTree(self.data)
def test_single_query(self):
d, i = self.kdtree.query(np.array([0,0,0]))
assert_(isinstance(d,float))
assert_(np.issubdtype(i, int))
def test_vectorized_query(self):
d, i = self.kdtree.query(np.zeros((2,4,3)))
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
def test_single_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.array([0,0,0]),k=kk)
assert_equal(np.shape(d),(kk,))
assert_equal(np.shape(i),(kk,))
assert_(np.all(~np.isfinite(d[-s:])))
assert_(np.all(i[-s:] == self.kdtree.n))
def test_vectorized_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.zeros((2,4,3)),k=kk)
assert_equal(np.shape(d),(2,4,kk))
assert_equal(np.shape(i),(2,4,kk))
assert_(np.all(~np.isfinite(d[:,:,-s:])))
assert_(np.all(i[:,:,-s:] == self.kdtree.n))
def test_single_query_all_neighbors(self):
d, i = self.kdtree.query([0,0,0],k=None,distance_upper_bound=1.1)
assert_(isinstance(d,list))
assert_(isinstance(i,list))
def test_vectorized_query_all_neighbors(self):
d, i = self.kdtree.query(np.zeros((2,4,3)),k=None,distance_upper_bound=1.1)
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
assert_(isinstance(d[0,0],list))
assert_(isinstance(i[0,0],list))
class test_vectorization_compiled:
def setUp(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = cKDTree(self.data)
def test_single_query(self):
d, i = self.kdtree.query([0,0,0])
assert_(isinstance(d,float))
assert_(isinstance(i,int))
def test_vectorized_query(self):
d, i = self.kdtree.query(np.zeros((2,4,3)))
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
def test_vectorized_query_noncontiguous_values(self):
np.random.seed(1234)
qs = np.random.randn(3,1000).T
ds, i_s = self.kdtree.query(qs)
for q, d, i in zip(qs,ds,i_s):
assert_equal(self.kdtree.query(q),(d,i))
def test_single_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query([0,0,0],k=kk)
assert_equal(np.shape(d),(kk,))
assert_equal(np.shape(i),(kk,))
assert_(np.all(~np.isfinite(d[-s:])))
assert_(np.all(i[-s:] == self.kdtree.n))
def test_vectorized_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.zeros((2,4,3)),k=kk)
assert_equal(np.shape(d),(2,4,kk))
assert_equal(np.shape(i),(2,4,kk))
assert_(np.all(~np.isfinite(d[:,:,-s:])))
assert_(np.all(i[:,:,-s:] == self.kdtree.n))
class ball_consistency:
def test_in_ball(self):
l = self.T.query_ball_point(self.x, self.d, p=self.p, eps=self.eps)
for i in l:
assert_(distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps))
def test_found_all(self):
c = np.ones(self.T.n,dtype=np.bool)
l = self.T.query_ball_point(self.x, self.d, p=self.p, eps=self.eps)
c[l] = False
assert_(np.all(distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps)))
class test_random_ball(ball_consistency):
def setUp(self):
n = 100
m = 4
np.random.seed(1234)
self.data = np.random.randn(n,m)
self.T = KDTree(self.data,leafsize=2)
self.x = np.random.randn(m)
self.p = 2.
self.eps = 0
self.d = 0.2
class test_random_ball_compiled(ball_consistency):
def setUp(self):
n = 100
m = 4
np.random.seed(1234)
self.data = np.random.randn(n,m)
self.T = cKDTree(self.data,leafsize=2)
self.x = np.random.randn(m)
self.p = 2.
self.eps = 0
self.d = 0.2
class test_random_ball_approx(test_random_ball):
def setUp(self):
test_random_ball.setUp(self)
self.eps = 0.1
class test_random_ball_approx_compiled(test_random_ball_compiled):
def setUp(self):
test_random_ball_compiled.setUp(self)
self.eps = 0.1
class test_random_ball_far(test_random_ball):
def setUp(self):
test_random_ball.setUp(self)
self.d = 2.
class test_random_ball_far_compiled(test_random_ball_compiled):
def setUp(self):
test_random_ball_compiled.setUp(self)
self.d = 2.
class test_random_ball_l1(test_random_ball):
def setUp(self):
test_random_ball.setUp(self)
self.p = 1
class test_random_ball_l1_compiled(test_random_ball_compiled):
def setUp(self):
test_random_ball_compiled.setUp(self)
self.p = 1
class test_random_ball_linf(test_random_ball):
def setUp(self):
test_random_ball.setUp(self)
self.p = np.inf
class test_random_ball_linf_compiled(test_random_ball_compiled):
def setUp(self):
test_random_ball_compiled.setUp(self)
self.p = np.inf
def test_random_ball_vectorized():
n = 20
m = 5
T = KDTree(np.random.randn(n,m))
r = T.query_ball_point(np.random.randn(2,3,m),1)
assert_equal(r.shape,(2,3))
assert_(isinstance(r[0,0],list))
def test_random_ball_vectorized_compiled():
n = 20
m = 5
np.random.seed(1234)
T = cKDTree(np.random.randn(n,m))
r = T.query_ball_point(np.random.randn(2,3,m),1)
assert_equal(r.shape,(2,3))
assert_(isinstance(r[0,0],list))
class two_trees_consistency:
def test_all_in_ball(self):
r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
for i, l in enumerate(r):
for j in l:
assert_(distance(self.data1[i],self.data2[j],self.p) <= self.d*(1.+self.eps))
def test_found_all(self):
r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
for i, l in enumerate(r):
c = np.ones(self.T2.n,dtype=np.bool)
c[l] = False
assert_(np.all(distance(self.data2[c],self.data1[i],self.p) >= self.d/(1.+self.eps)))
class test_two_random_trees(two_trees_consistency):
def setUp(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.randn(n,m)
self.T1 = KDTree(self.data1,leafsize=2)
self.data2 = np.random.randn(n,m)
self.T2 = KDTree(self.data2,leafsize=2)
self.p = 2.
self.eps = 0
self.d = 0.2
class test_two_random_trees_compiled(two_trees_consistency):
def setUp(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.randn(n,m)
self.T1 = cKDTree(self.data1,leafsize=2)
self.data2 = np.random.randn(n,m)
self.T2 = cKDTree(self.data2,leafsize=2)
self.p = 2.
self.eps = 0
self.d = 0.2
class test_two_random_trees_far(test_two_random_trees):
def setUp(self):
test_two_random_trees.setUp(self)
self.d = 2
class test_two_random_trees_far_compiled(test_two_random_trees_compiled):
def setUp(self):
test_two_random_trees_compiled.setUp(self)
self.d = 2
class test_two_random_trees_linf(test_two_random_trees):
def setUp(self):
test_two_random_trees.setUp(self)
self.p = np.inf
class test_two_random_trees_linf_compiled(test_two_random_trees_compiled):
def setUp(self):
test_two_random_trees_compiled.setUp(self)
self.p = np.inf
class test_rectangle:
def setUp(self):
self.rect = Rectangle([0,0],[1,1])
def test_min_inside(self):
assert_almost_equal(self.rect.min_distance_point([0.5,0.5]),0)
def test_min_one_side(self):
assert_almost_equal(self.rect.min_distance_point([0.5,1.5]),0.5)
def test_min_two_sides(self):
assert_almost_equal(self.rect.min_distance_point([2,2]),np.sqrt(2))
def test_max_inside(self):
assert_almost_equal(self.rect.max_distance_point([0.5,0.5]),1/np.sqrt(2))
def test_max_one_side(self):
assert_almost_equal(self.rect.max_distance_point([0.5,1.5]),np.hypot(0.5,1.5))
def test_max_two_sides(self):
assert_almost_equal(self.rect.max_distance_point([2,2]),2*np.sqrt(2))
def test_split(self):
less, greater = self.rect.split(0,0.1)
assert_array_equal(less.maxes,[0.1,1])
assert_array_equal(less.mins,[0,0])
assert_array_equal(greater.maxes,[1,1])
assert_array_equal(greater.mins,[0.1,0])
def test_distance_l2():
assert_almost_equal(distance([0,0],[1,1],2),np.sqrt(2))
def test_distance_l1():
assert_almost_equal(distance([0,0],[1,1],1),2)
def test_distance_linf():
assert_almost_equal(distance([0,0],[1,1],np.inf),1)
def test_distance_vectorization():
np.random.seed(1234)
x = np.random.randn(10,1,3)
y = np.random.randn(1,7,3)
assert_equal(distance(x,y).shape,(10,7))
class test_count_neighbors:
def setUp(self):
n = 50
m = 2
np.random.seed(1234)
self.T1 = KDTree(np.random.randn(n,m),leafsize=2)
self.T2 = KDTree(np.random.randn(n,m),leafsize=2)
def test_one_radius(self):
r = 0.2
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
def test_large_radius(self):
r = 1000
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
def test_multiple_radius(self):
rs = np.exp(np.linspace(np.log(0.01),np.log(10),3))
results = self.T1.count_neighbors(self.T2, rs)
assert_(np.all(np.diff(results) >= 0))
for r,result in zip(rs, results):
assert_equal(self.T1.count_neighbors(self.T2, r), result)
class test_count_neighbors_compiled:
def setUp(self):
n = 50
m = 2
np.random.seed(1234)
self.T1 = cKDTree(np.random.randn(n,m),leafsize=2)
self.T2 = cKDTree(np.random.randn(n,m),leafsize=2)
def test_one_radius(self):
r = 0.2
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
def test_large_radius(self):
r = 1000
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
def test_multiple_radius(self):
rs = np.exp(np.linspace(np.log(0.01),np.log(10),3))
results = self.T1.count_neighbors(self.T2, rs)
assert_(np.all(np.diff(results) >= 0))
for r,result in zip(rs, results):
assert_equal(self.T1.count_neighbors(self.T2, r), result)
class test_sparse_distance_matrix:
def setUp(self):
n = 50
m = 4
np.random.seed(1234)
self.T1 = KDTree(np.random.randn(n,m),leafsize=2)
self.T2 = KDTree(np.random.randn(n,m),leafsize=2)
self.r = 0.5
def test_consistency_with_neighbors(self):
M = self.T1.sparse_distance_matrix(self.T2, self.r)
r = self.T1.query_ball_tree(self.T2, self.r)
for i,l in enumerate(r):
for j in l:
assert_almost_equal(M[i,j],
distance(self.T1.data[i], self.T2.data[j]),
decimal=14)
for ((i,j),d) in M.items():
assert_(j in r[i])
def test_zero_distance(self):
# raises an exception for bug 870
self.T1.sparse_distance_matrix(self.T1, self.r)
class test_sparse_distance_matrix_compiled:
def setUp(self):
n = 50
m = 4
np.random.seed(0)
data1 = np.random.randn(n,m)
data2 = np.random.randn(n,m)
self.T1 = cKDTree(data1,leafsize=2)
self.T2 = cKDTree(data2,leafsize=2)
self.ref_T1 = KDTree(data1, leafsize=2)
self.ref_T2 = KDTree(data2, leafsize=2)
self.r = 0.5
def test_consistency_with_neighbors(self):
M = self.T1.sparse_distance_matrix(self.T2, self.r)
r = self.T1.query_ball_tree(self.T2, self.r)
for i,l in enumerate(r):
for j in l:
assert_almost_equal(M[i,j],
distance(self.T1.data[i], self.T2.data[j]),
decimal=14)
for ((i,j),d) in M.items():
assert_(j in r[i])
def test_zero_distance(self):
# raises an exception for bug 870 (FIXME: Does it?)
self.T1.sparse_distance_matrix(self.T1, self.r)
def test_consistency_with_python(self):
M1 = self.T1.sparse_distance_matrix(self.T2, self.r)
M2 = self.ref_T1.sparse_distance_matrix(self.ref_T2, self.r)
assert_array_almost_equal(M1.todense(), M2.todense(), decimal=14)
def test_distance_matrix():
m = 10
n = 11
k = 4
np.random.seed(1234)
xs = np.random.randn(m,k)
ys = np.random.randn(n,k)
ds = distance_matrix(xs,ys)
assert_equal(ds.shape, (m,n))
for i in range(m):
for j in range(n):
assert_almost_equal(distance(xs[i],ys[j]),ds[i,j])
def test_distance_matrix_looping():
m = 10
n = 11
k = 4
np.random.seed(1234)
xs = np.random.randn(m,k)
ys = np.random.randn(n,k)
ds = distance_matrix(xs,ys)
dsl = distance_matrix(xs,ys,threshold=1)
assert_equal(ds,dsl)
def check_onetree_query(T,d):
r = T.query_ball_tree(T, d)
s = set()
for i, l in enumerate(r):
for j in l:
if i < j:
s.add((i,j))
assert_(s == T.query_pairs(d))
def test_onetree_query():
np.random.seed(0)
n = 50
k = 4
points = np.random.randn(n,k)
T = KDTree(points)
yield check_onetree_query, T, 0.1
points = np.random.randn(3*n,k)
points[:n] *= 0.001
points[n:2*n] += 2
T = KDTree(points)
yield check_onetree_query, T, 0.1
yield check_onetree_query, T, 0.001
yield check_onetree_query, T, 0.00001
yield check_onetree_query, T, 1e-6
def test_onetree_query_compiled():
np.random.seed(0)
n = 100
k = 4
points = np.random.randn(n,k)
T = cKDTree(points)
yield check_onetree_query, T, 0.1
points = np.random.randn(3*n,k)
points[:n] *= 0.001
points[n:2*n] += 2
T = cKDTree(points)
yield check_onetree_query, T, 0.1
yield check_onetree_query, T, 0.001
yield check_onetree_query, T, 0.00001
yield check_onetree_query, T, 1e-6
def test_query_pairs_single_node():
tree = KDTree([[0, 1]])
assert_equal(tree.query_pairs(0.5), set())
def test_query_pairs_single_node_compiled():
tree = cKDTree([[0, 1]])
assert_equal(tree.query_pairs(0.5), set())
def test_ball_point_ints():
# Regression test for #1373.
x, y = np.mgrid[0:4, 0:4]
points = list(zip(x.ravel(), y.ravel()))
tree = KDTree(points)
assert_equal(sorted([4, 8, 9, 12]),
sorted(tree.query_ball_point((2, 0), 1)))
points = np.asarray(points, dtype=np.float)
tree = KDTree(points)
assert_equal(sorted([4, 8, 9, 12]),
sorted(tree.query_ball_point((2, 0), 1)))
def test_kdtree_comparisons():
# Regression test: node comparisons were done wrong in 0.12 w/Py3.
nodes = [KDTree.node() for _ in range(3)]
assert_equal(sorted(nodes), sorted(nodes[::-1]))
def test_ckdtree_build_modes():
# check if different build modes for cKDTree give
# similar query results
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T1 = cKDTree(points).query(points, k=5)[-1]
T2 = cKDTree(points, compact_nodes=False).query(points, k=5)[-1]
T3 = cKDTree(points, balanced_tree=False).query(points, k=5)[-1]
T4 = cKDTree(points, compact_nodes=False, balanced_tree=False).query(points, k=5)[-1]
assert_array_equal(T1, T2)
assert_array_equal(T1, T3)
assert_array_equal(T1, T4)
def test_ckdtree_pickle():
# test if it is possible to pickle
# a cKDTree
try:
import cPickle
# known failure on Python 2
# pickle currently only supported on Python 3
cPickle.dumps('pyflakes dummy')
except ImportError:
import pickle
np.random.seed(0)
n = 50
k = 4
points = np.random.randn(n, k)
T1 = cKDTree(points)
tmp = pickle.dumps(T1)
T2 = pickle.loads(tmp)
T1 = T1.query(points, k=5)[-1]
T2 = T2.query(points, k=5)[-1]
assert_array_equal(T1, T2)
def test_ckdtree_copy_data():
# check if copy_data=True makes the kd-tree
# impervious to data corruption by modification of
# the data arrray
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T = cKDTree(points, copy_data=True)
q = points.copy()
T1 = T.query(q, k=5)[-1]
points[...] = np.random.randn(n, k)
T2 = T.query(q, k=5)[-1]
assert_array_equal(T1, T2)
def test_ckdtree_parallel():
# check if parallel=True also generates correct
# query results
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T = cKDTree(points)
T1 = T.query(points, k=5, n_jobs=64)[-1]
T2 = T.query(points, k=5, n_jobs=-1)[-1]
T3 = T.query(points, k=5)[-1]
assert_array_equal(T1, T2)
assert_array_equal(T1, T3)
def test_ckdtree_view():
# Check that the nodes can be correctly viewed from Python.
# This test also sanity checks each node in the cKDTree, and
# thus verifies the internal structure of the kd-tree.
np.random.seed(0)
n = 100
k = 4
points = np.random.randn(n, k)
kdtree = cKDTree(points)
# walk the whole kd-tree and sanity check each node
def recurse_tree(n):
assert_(isinstance(n, cKDTreeNode))
if n.split_dim == -1:
assert_(n.lesser is None)
assert_(n.greater is None)
assert_(n.indices.shape[0] <= kdtree.leafsize)
else:
recurse_tree(n.lesser)
recurse_tree(n.greater)
x = n.lesser.data_points[:, n.split_dim]
y = n.greater.data_points[:, n.split_dim]
assert_(x.max() < y.min())
recurse_tree(kdtree.tree)
# check that indices are correctly retreived
n = kdtree.tree
assert_array_equal(np.sort(n.indices), range(100))
# check that data_points are correctly retreived
assert_array_equal(kdtree.data[n.indices, :], n.data_points)
# cKDTree is specialized to type double points, so no need to make
# a unit test corresponding to test_ball_point_ints()
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
agdestine/machine-learning | code/abaloneUtils.py | 5 | 4299 | # utils
# Utility functions for handling data
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Thu Feb 26 17:47:35 2015 -0500
#
# Copyright (C) 2015 District Data Labs
# For license information, see LICENSE.txt
#
# ID: utils.py [] benjamin@bengfort.com $
"""
Utility functions for handling data
"""
##########################################################################
## Imports
##########################################################################
import os
import csv
import time
import json
import numpy as np
from sklearn.datasets.base import Bunch
##########################################################################
## Module Constants
##########################################################################
SKL_DATA = "SCIKIT_LEARN_DATA"
BASE_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
DATA_DIR = os.path.join(BASE_DIR, "data")
CODE_DIR = os.path.join(BASE_DIR, "code")
##########################################################################
## Helper Functions
##########################################################################
def timeit(func):
"""
Returns how long a function took to execute, along with the output
"""
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
return result, time.time() - start
return timeit
##########################################################################
## Dataset Loading
##########################################################################
def get_data_home(data_home=None):
"""
Returns the path of the data directory
"""
if data_home is None:
data_home = os.environ.get(SKL_DATA, DATA_DIR)
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
def load_data(path, descr=None, target_index=-1):
"""
Returns a scklearn dataset Bunch which includes several important
attributes that are used in modeling:
data: array of shape n_samples * n_features
target: array of length n_samples
feature_names: names of the features
target_names: names of the targets
filenames: names of the files that were loaded
DESCR: contents of the readme
This data therefore has the look and feel of the toy datasets.
Pass in a path usually just the name of the location in the data dir.
It will be joined with the result of `get_data_home`. The contents are:
path
- abalone.names # The file to load into DESCR
- meta.json # A file containing metadata to load
- dataset.txt # The numpy loadtxt file
- dataset.csv # The pandas read_csv file
You can specify another descr, another feature_names, and whether or
not the dataset has a header row. You can also specify the index of the
target, which by default is the last item in the row (-1)
"""
root = os.path.join(get_data_home(), path)
filenames = {
'meta': os.path.join(root, 'meta.json'),
'rdme': os.path.join(root, 'abalone.names'),
'data': os.path.join(root, 'dataset.csv'),
}
target_names = None
feature_names = None
DESCR = None
with open(filenames['meta'], 'r') as f:
meta = json.load(f)
target_names = meta['target_names']
feature_names = meta['feature_names']
with open(filenames['rdme'], 'r') as f:
DESCR = f.read()
# skip header from csv, load data
dataset = np.loadtxt(filenames['data'], delimiter=',', skiprows=1)
data = None
target = None
# Target assumed to be either last or first row
if target_index == -1:
data = dataset[:,0:-1]
target = dataset[:,-1]
elif target_index == 0:
data = dataset[:,1:]
target = dataset[:,0]
else:
raise ValueError("Target index must be either -1 or 0")
return Bunch(data=data,
target=target,
filenames=filenames,
target_names=target_names,
feature_names=feature_names,
DESCR=DESCR)
def load_abalone():
return load_data('abalone')
| mit |
pravsripad/mne-python | setup.py | 4 | 4660 | #!/usr/bin/env python
# Copyright (C) 2011-2020 Alexandre Gramfort
# <alexandre.gramfort@inria.fr>
import os
import os.path as op
from setuptools import setup
# get the version (don't import mne here, so dependencies are not needed)
version = None
with open(op.join('mne', '_version.py'), 'r') as fid:
for line in (line.strip() for line in fid):
if line.startswith('__version__'):
version = line.split('=')[1].strip().strip('\'')
break
if version is None:
raise RuntimeError('Could not determine version')
descr = """MNE python project for MEG and EEG data analysis."""
DISTNAME = 'mne'
DESCRIPTION = descr
MAINTAINER = 'Alexandre Gramfort'
MAINTAINER_EMAIL = 'alexandre.gramfort@inria.fr'
URL = 'https://mne.tools/dev/'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'http://github.com/mne-tools/mne-python'
VERSION = version
def package_tree(pkgroot):
"""Get the submodule list."""
# Adapted from VisPy
path = op.dirname(__file__)
subdirs = [op.relpath(i[0], path).replace(op.sep, '.')
for i in os.walk(op.join(path, pkgroot))
if '__init__.py' in i[2]]
return sorted(subdirs)
if __name__ == "__main__":
if op.exists('MANIFEST'):
os.remove('MANIFEST')
with open('README.rst', 'r') as fid:
long_description = fid.read()
hard_dependencies = ('numpy', 'scipy')
install_requires = list()
with open('requirements.txt', 'r') as fid:
for line in fid:
req = line.strip()
for hard_dep in hard_dependencies:
if req.startswith(hard_dep):
install_requires.append(req)
setup(name=DISTNAME,
maintainer=MAINTAINER,
include_package_data=True,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=long_description,
long_description_content_type='text/x-rst',
zip_safe=False, # the package can run out of an .egg file
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3',
],
keywords='neuroscience neuroimaging MEG EEG ECoG fNIRS brain',
project_urls={
'Documentation': 'https://mne.tools/',
'Source': 'https://github.com/mne-tools/mne-python/',
'Tracker': 'https://github.com/mne-tools/mne-python/issues/',
},
platforms='any',
python_requires='>=3.6',
install_requires=install_requires,
packages=package_tree('mne'),
package_data={'mne': [
op.join('data', '*.sel'),
op.join('data', 'icos.fif.gz'),
op.join('data', 'coil_def*.dat'),
op.join('data', 'helmets', '*.fif.gz'),
op.join('data', 'FreeSurferColorLUT.txt'),
op.join('data', 'image', '*gif'),
op.join('data', 'image', '*lout'),
op.join('data', 'fsaverage', '*.fif'),
op.join('channels', 'data', 'layouts', '*.lout'),
op.join('channels', 'data', 'layouts', '*.lay'),
op.join('channels', 'data', 'montages', '*.sfp'),
op.join('channels', 'data', 'montages', '*.txt'),
op.join('channels', 'data', 'montages', '*.elc'),
op.join('channels', 'data', 'neighbors', '*.mat'),
op.join('datasets', 'sleep_physionet', 'SHA1SUMS'),
op.join('datasets', '_fsaverage', '*.txt'),
op.join('datasets', '_infant', '*.txt'),
op.join('gui', 'help', '*.json'),
op.join('html', '*.js'),
op.join('html', '*.css'),
op.join('icons', '*.svg'),
op.join('icons', '*.png'),
op.join('io', 'artemis123', 'resources', '*.csv'),
op.join('io', 'edf', 'gdf_encodes.txt')
]},
entry_points={'console_scripts': [
'mne = mne.commands.utils:main',
]})
| bsd-3-clause |
hbrunn/OpenUpgrade | openerp/osv/orm.py | 9 | 262287 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Object relational mapping to database (postgresql) module
* Hierarchical structure
* Constraints consistency, validations
* Object meta Data depends on its status
* Optimised processing by complex query (multiple actions at once)
* Default fields value
* Permissions optimisation
* Persistant object: DB postgresql
* Datas conversions
* Multi-level caching system
* 2 different inheritancies
* Fields:
- classicals (varchar, integer, boolean, ...)
- relations (one2many, many2one, many2many)
- functions
"""
import calendar
import collections
import copy
import datetime
import itertools
import logging
import operator
import pickle
import pytz
import re
import simplejson
import time
import traceback
import types
import babel.dates
import dateutil.relativedelta
import psycopg2
from lxml import etree
import fields
import openerp
import openerp.tools as tools
from openerp.tools.config import config
from openerp.tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from query import Query
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
# List of etree._Element subclasses that we choose to ignore when parsing XML.
from openerp.tools import SKIPPED_ELEMENT_TYPES
regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
regex_object_name = re.compile(r'^[a-z0-9_.]+$')
AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
def transfer_field_to_modifiers(field, modifiers):
default_values = {}
state_exceptions = {}
for attr in ('invisible', 'readonly', 'required'):
state_exceptions[attr] = []
default_values[attr] = bool(field.get(attr))
for state, modifs in (field.get("states",{})).items():
for modif in modifs:
if default_values[modif[0]] != modif[1]:
state_exceptions[modif[0]].append(state)
for attr, default_value in default_values.items():
if state_exceptions[attr]:
modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
else:
modifiers[attr] = default_value
# Don't deal with groups, it is done by check_group().
# Need the context to evaluate the invisible attribute on tree views.
# For non-tree views, the context shouldn't be given.
def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
if node.get('attrs'):
modifiers.update(eval(node.get('attrs')))
if node.get('states'):
if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
# TODO combine with AND or OR, use implicit AND for now.
modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
else:
modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
for a in ('invisible', 'readonly', 'required'):
if node.get(a):
v = bool(eval(node.get(a), {'context': context or {}}))
if in_tree_view and a == 'invisible':
# Invisible in a tree view has a specific meaning, make it a
# new key in the modifiers attribute.
modifiers['tree_invisible'] = v
elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
# Don't set the attribute to False if a dynamic value was
# provided (i.e. a domain from attrs or states).
modifiers[a] = v
def simplify_modifiers(modifiers):
for a in ('invisible', 'readonly', 'required'):
if a in modifiers and not modifiers[a]:
del modifiers[a]
def transfer_modifiers_to_node(modifiers, node):
if modifiers:
simplify_modifiers(modifiers)
node.set('modifiers', simplejson.dumps(modifiers))
def setup_modifiers(node, field=None, context=None, in_tree_view=False):
""" Processes node attributes and field descriptors to generate
the ``modifiers`` node attribute and set it on the provided node.
Alters its first argument in-place.
:param node: ``field`` node from an OpenERP view
:type node: lxml.etree._Element
:param dict field: field descriptor corresponding to the provided node
:param dict context: execution context used to evaluate node attributes
:param bool in_tree_view: triggers the ``tree_invisible`` code
path (separate from ``invisible``): in
tree view there are two levels of
invisibility, cell content (a column is
present but the cell itself is not
displayed) with ``invisible`` and column
invisibility (the whole column is
hidden) with ``tree_invisible``.
:returns: nothing
"""
modifiers = {}
if field is not None:
transfer_field_to_modifiers(field, modifiers)
transfer_node_to_modifiers(
node, modifiers, context=context, in_tree_view=in_tree_view)
transfer_modifiers_to_node(modifiers, node)
def test_modifiers(what, expected):
modifiers = {}
if isinstance(what, basestring):
node = etree.fromstring(what)
transfer_node_to_modifiers(node, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
elif isinstance(what, dict):
transfer_field_to_modifiers(what, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
# To use this test:
# import openerp
# openerp.osv.orm.modifiers_tests()
def modifiers_tests():
test_modifiers('<field name="a"/>', '{}')
test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}')
test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}')
test_modifiers('<field name="a" required="1"/>', '{"required": true}')
test_modifiers('<field name="a" invisible="0"/>', '{}')
test_modifiers('<field name="a" readonly="0"/>', '{}')
test_modifiers('<field name="a" required="0"/>', '{}')
test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed
test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}')
test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}')
test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}')
# The dictionary is supposed to be the result of fields_get().
test_modifiers({}, '{}')
test_modifiers({"invisible": True}, '{"invisible": true}')
test_modifiers({"invisible": False}, '{}')
def check_object_name(name):
""" Check if the given name is a valid openerp object name.
The _name attribute in osv and osv_memory object is subject to
some restrictions. This function returns True or False whether
the given name is allowed or not.
TODO: this is an approximation. The goal in this approximation
is to disallow uppercase characters (in some places, we quote
table/column names and in other not, which leads to this kind
of errors:
psycopg2.ProgrammingError: relation "xxx" does not exist).
The same restriction should apply to both osv and osv_memory
objects for consistency.
"""
if regex_object_name.match(name) is None:
return False
return True
def raise_on_invalid_object_name(name):
if not check_object_name(name):
msg = "The _name attribute %s is not valid." % name
_logger.error(msg)
raise except_orm('ValueError', msg)
POSTGRES_CONFDELTYPES = {
'RESTRICT': 'r',
'NO ACTION': 'a',
'CASCADE': 'c',
'SET NULL': 'n',
'SET DEFAULT': 'd',
}
def intersect(la, lb):
return filter(lambda x: x in lb, la)
def fix_import_export_id_paths(fieldname):
"""
Fixes the id fields in import and exports, and splits field paths
on '/'.
:param str fieldname: name of the field to import/export
:return: split field name
:rtype: list of str
"""
fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
return fixed_external_id.split('/')
class except_orm(Exception):
def __init__(self, name, value):
self.name = name
self.value = value
self.args = (name, value)
class BrowseRecordError(Exception):
pass
class browse_null(object):
""" Readonly python database object browser
"""
def __init__(self):
self.id = False
def __getitem__(self, name):
return None
def __getattr__(self, name):
return None # XXX: return self ?
def __int__(self):
return False
def __str__(self):
return ''
def __nonzero__(self):
return False
def __unicode__(self):
return u''
def __iter__(self):
raise NotImplementedError("Iteration is not allowed on %s" % self)
#
# TODO: execute an object method on browse_record_list
#
class browse_record_list(list):
""" Collection of browse objects
Such an instance will be returned when doing a ``browse([ids..])``
and will be iterable, yielding browse() objects
"""
def __init__(self, lst, context=None):
if not context:
context = {}
super(browse_record_list, self).__init__(lst)
self.context = context
class browse_record(object):
""" An object that behaves like a row of an object's table.
It has attributes after the columns of the corresponding object.
Examples::
uobj = pool.get('res.users')
user_rec = uobj.browse(cr, uid, 104)
name = user_rec.name
"""
def __init__(self, cr, uid, id, table, cache, context=None,
list_class=browse_record_list, fields_process=None):
"""
:param table: the browsed object (inherited from orm)
:param dict cache: a dictionary of model->field->data to be shared
across browse objects, thus reducing the SQL
read()s. It can speed up things a lot, but also be
disastrous if not discarded after write()/unlink()
operations
:param dict context: dictionary with an optional context
"""
if fields_process is None:
fields_process = {}
if context is None:
context = {}
self._list_class = list_class
self._cr = cr
self._uid = uid
self._id = id
self._table = table # deprecated, use _model!
self._model = table
self._table_name = self._table._name
self.__logger = logging.getLogger('openerp.osv.orm.browse_record.' + self._table_name)
self._context = context
self._fields_process = fields_process
cache.setdefault(table._name, {})
self._data = cache[table._name]
# if not (id and isinstance(id, (int, long,))):
# raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
# if not table.exists(cr, uid, id, context):
# raise BrowseRecordError(_('Object %s does not exists') % (self,))
if id not in self._data:
self._data[id] = {'id': id}
self._cache = cache
def __getitem__(self, name):
if name == 'id':
return self._id
if name not in self._data[self._id]:
# build the list of fields we will fetch
# fetch the definition of the field which was asked for
if name in self._table._columns:
col = self._table._columns[name]
elif name in self._table._inherit_fields:
col = self._table._inherit_fields[name][2]
elif hasattr(self._table, str(name)):
attr = getattr(self._table, name)
if isinstance(attr, (types.MethodType, types.LambdaType, types.FunctionType)):
def function_proxy(*args, **kwargs):
if 'context' not in kwargs and self._context:
kwargs.update(context=self._context)
return attr(self._cr, self._uid, [self._id], *args, **kwargs)
return function_proxy
else:
return attr
else:
error_msg = "Field '%s' does not exist in object '%s'" % (name, self)
self.__logger.warning(error_msg)
if self.__logger.isEnabledFor(logging.DEBUG):
self.__logger.debug(''.join(traceback.format_stack()))
raise KeyError(error_msg)
prefetchable = lambda f: f._classic_write and f._prefetch and not f.groups and not f.deprecated
# if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
if prefetchable(col):
# gen the list of "local" (ie not inherited) fields which are classic or many2one
field_filter = lambda x: prefetchable(x[1])
fields_to_fetch = filter(field_filter, self._table._columns.items())
# gen the list of inherited fields
inherits = map(lambda x: (x[0], x[1][2]), self._table._inherit_fields.items())
# complete the field list with the inherited fields which are classic or many2one
fields_to_fetch += filter(field_filter, inherits)
# otherwise we fetch only that field
else:
fields_to_fetch = [(name, col)]
ids = filter(lambda id: name not in self._data[id], self._data.keys())
# read the results
field_names = map(lambda x: x[0], fields_to_fetch)
try:
field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
except (openerp.exceptions.AccessError, except_orm):
if len(ids) == 1:
raise
# prefetching attempt failed, perhaps we're violating ACL restrictions involuntarily
_logger.info('Prefetching attempt for fields %s on %s failed for ids %s, re-trying just for id %s', field_names, self._model._name, ids, self._id)
ids = [self._id]
field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
# TODO: improve this, very slow for reports
if self._fields_process:
lang = self._context.get('lang', 'en_US') or 'en_US'
lang_obj_ids = self.pool.get('res.lang').search(self._cr, self._uid, [('code', '=', lang)])
if not lang_obj_ids:
raise Exception(_('Language with code "%s" is not defined in your system !\nDefine it through the Administration menu.') % (lang,))
lang_obj = self.pool.get('res.lang').browse(self._cr, self._uid, lang_obj_ids[0])
for field_name, field_column in fields_to_fetch:
if field_column._type in self._fields_process:
for result_line in field_values:
result_line[field_name] = self._fields_process[field_column._type](result_line[field_name])
if result_line[field_name]:
result_line[field_name].set_value(self._cr, self._uid, result_line[field_name], self, field_column, lang_obj)
if not field_values:
# Where did those ids come from? Perhaps old entries in ir_model_dat?
_logger.warning("No field_values found for ids %s in %s", ids, self)
raise KeyError('Field %s not found in %s'%(name, self))
# create browse records for 'remote' objects
for result_line in field_values:
new_data = {}
for field_name, field_column in fields_to_fetch:
if field_column._type == 'many2one':
if result_line[field_name]:
obj = self._table.pool[field_column._obj]
if isinstance(result_line[field_name], (list, tuple)):
value = result_line[field_name][0]
else:
value = result_line[field_name]
if value:
# FIXME: this happen when a _inherits object
# overwrite a field of it parent. Need
# testing to be sure we got the right
# object and not the parent one.
if not isinstance(value, browse_record):
if obj is None:
# In some cases the target model is not available yet, so we must ignore it,
# which is safe in most cases, this value will just be loaded later when needed.
# This situation can be caused by custom fields that connect objects with m2o without
# respecting module dependencies, causing relationships to be connected to soon when
# the target is not loaded yet.
continue
new_data[field_name] = browse_record(self._cr,
self._uid, value, obj, self._cache,
context=self._context,
list_class=self._list_class,
fields_process=self._fields_process)
else:
new_data[field_name] = value
else:
new_data[field_name] = browse_null()
else:
new_data[field_name] = browse_null()
elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]):
new_data[field_name] = self._list_class(
(browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj),
self._cache, context=self._context, list_class=self._list_class,
fields_process=self._fields_process)
for id in result_line[field_name]),
context=self._context)
elif field_column._type == 'reference':
if result_line[field_name]:
if isinstance(result_line[field_name], browse_record):
new_data[field_name] = result_line[field_name]
else:
ref_obj, ref_id = result_line[field_name].split(',')
ref_id = long(ref_id)
if ref_id:
obj = self._table.pool[ref_obj]
new_data[field_name] = browse_record(self._cr, self._uid, ref_id, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process)
else:
new_data[field_name] = browse_null()
else:
new_data[field_name] = browse_null()
else:
new_data[field_name] = result_line[field_name]
self._data[result_line['id']].update(new_data)
if not name in self._data[self._id]:
# How did this happen? Could be a missing model due to custom fields used too soon, see above.
self.__logger.error("Fields to fetch: %s, Field values: %s", field_names, field_values)
self.__logger.error("Cached: %s, Table: %s", self._data[self._id], self._table)
raise KeyError(_('Unknown attribute %s in %s ') % (name, self))
return self._data[self._id][name]
def __getattr__(self, name):
try:
return self[name]
except KeyError, e:
import sys
exc_info = sys.exc_info()
raise AttributeError, "Got %r while trying to get attribute %s on a %s record." % (e, name, self._table._name), exc_info[2]
def __contains__(self, name):
return (name in self._table._columns) or (name in self._table._inherit_fields) or hasattr(self._table, name)
def __iter__(self):
raise NotImplementedError("Iteration is not allowed on %s" % self)
def __hasattr__(self, name):
return name in self
def __int__(self):
return self._id
def __str__(self):
return "browse_record(%s, %s)" % (self._table_name, self._id)
def __eq__(self, other):
if not isinstance(other, browse_record):
return False
return (self._table_name, self._id) == (other._table_name, other._id)
def __ne__(self, other):
if not isinstance(other, browse_record):
return True
return (self._table_name, self._id) != (other._table_name, other._id)
# we need to define __unicode__ even though we've already defined __str__
# because we have overridden __getattr__
def __unicode__(self):
return unicode(str(self))
def __hash__(self):
return hash((self._table_name, self._id))
__repr__ = __str__
def refresh(self):
"""Force refreshing this browse_record's data and all the data of the
records that belong to the same cache, by emptying the cache completely,
preserving only the record identifiers (for prefetching optimizations).
"""
for model, model_cache in self._cache.iteritems():
# only preserve the ids of the records that were in the cache
cached_ids = dict([(i, {'id': i}) for i in model_cache.keys()])
self._cache[model].clear()
self._cache[model].update(cached_ids)
def pg_varchar(size=0):
""" Returns the VARCHAR declaration for the provided size:
* If no size (or an empty or negative size is provided) return an
'infinite' VARCHAR
* Otherwise return a VARCHAR(n)
:type int size: varchar size, optional
:rtype: str
"""
if size:
if not isinstance(size, int):
raise TypeError("VARCHAR parameter should be an int, got %s"
% type(size))
if size > 0:
return 'VARCHAR(%d)' % size
return 'VARCHAR'
FIELDS_TO_PGTYPES = {
fields.boolean: 'bool',
fields.integer: 'int4',
fields.text: 'text',
fields.html: 'text',
fields.date: 'date',
fields.datetime: 'timestamp',
fields.binary: 'bytea',
fields.many2one: 'int4',
fields.serialized: 'text',
}
def get_pg_type(f, type_override=None):
"""
:param fields._column f: field to get a Postgres type for
:param type type_override: use the provided type for dispatching instead of the field's own type
:returns: (postgres_identification_type, postgres_type_specification)
:rtype: (str, str)
"""
field_type = type_override or type(f)
if field_type in FIELDS_TO_PGTYPES:
pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
elif issubclass(field_type, fields.float):
if f.digits:
pg_type = ('numeric', 'NUMERIC')
else:
pg_type = ('float8', 'DOUBLE PRECISION')
elif issubclass(field_type, (fields.char, fields.reference)):
pg_type = ('varchar', pg_varchar(f.size))
elif issubclass(field_type, fields.selection):
if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
or getattr(f, 'size', None) == -1:
pg_type = ('int4', 'INTEGER')
else:
pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
elif issubclass(field_type, fields.function):
if f._type == 'selection':
pg_type = ('varchar', pg_varchar())
else:
pg_type = get_pg_type(f, getattr(fields, f._type))
else:
_logger.warning('%s type not supported!', field_type)
pg_type = None
return pg_type
class MetaModel(type):
""" Metaclass for the Model.
This class is used as the metaclass for the Model class to discover
the models defined in a module (i.e. without instanciating them).
If the automatic discovery is not needed, it is possible to set the
model's _register attribute to False.
"""
module_to_models = {}
def __init__(self, name, bases, attrs):
if not self._register:
self._register = True
super(MetaModel, self).__init__(name, bases, attrs)
return
# The (OpenERP) module name can be in the `openerp.addons` namespace
# or not. For instance module `sale` can be imported as
# `openerp.addons.sale` (the good way) or `sale` (for backward
# compatibility).
module_parts = self.__module__.split('.')
if len(module_parts) > 2 and module_parts[0] == 'openerp' and \
module_parts[1] == 'addons':
module_name = self.__module__.split('.')[2]
else:
module_name = self.__module__.split('.')[0]
if not hasattr(self, '_module'):
self._module = module_name
# Remember which models to instanciate for this module.
if not self._custom:
self.module_to_models.setdefault(self._module, []).append(self)
# Definition of log access columns, automatically added to models if
# self._log_access is True
LOG_ACCESS_COLUMNS = {
'create_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
'create_date': 'TIMESTAMP',
'write_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
'write_date': 'TIMESTAMP'
}
# special columns automatically created by the ORM
MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS.keys()
class BaseModel(object):
""" Base class for OpenERP models.
OpenERP models are created by inheriting from this class' subclasses:
* Model: for regular database-persisted models
* TransientModel: for temporary data, stored in the database but automatically
vaccuumed every so often
* AbstractModel: for abstract super classes meant to be shared by multiple
_inheriting classes (usually Models or TransientModels)
The system will later instantiate the class once per database (on
which the class' module is installed).
To create a class that should not be instantiated, the _register class attribute
may be set to False.
"""
__metaclass__ = MetaModel
_auto = True # create database backend
_register = False # Set to false if the model shouldn't be automatically discovered.
_name = None
_columns = {}
_constraints = []
_custom = False
_defaults = {}
_rec_name = None
_parent_name = 'parent_id'
_parent_store = False
_parent_order = False
_date_name = 'date'
_order = 'id'
_sequence = None
_description = None
_needaction = False
# dict of {field:method}, with method returning the (name_get of records, {id: fold})
# to include in the _read_group, if grouped on this field
_group_by_full = {}
# Transience
_transient = False # True in a TransientModel
# structure:
# { 'parent_model': 'm2o_field', ... }
_inherits = {}
# Mapping from inherits'd field name to triple (m, r, f, n) where m is the
# model from which it is inherits'd, r is the (local) field towards m, f
# is the _column object itself, and n is the original (i.e. top-most)
# parent model.
# Example:
# { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
# field_column_obj, origina_parent_model), ... }
_inherit_fields = {}
# Mapping field name/column_info object
# This is similar to _inherit_fields but:
# 1. includes self fields,
# 2. uses column_info instead of a triple.
_all_columns = {}
_table = None
_log_create = False
_sql_constraints = []
_protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists']
CONCURRENCY_CHECK_FIELD = '__last_update'
def log(self, cr, uid, id, message, secondary=False, context=None):
return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
def view_init(self, cr, uid, fields_list, context=None):
"""Override this method to do specific things when a view on the object is opened."""
pass
def _field_create(self, cr, context=None):
""" Create entries in ir_model_fields for all the model's fields.
If necessary, also create an entry in ir_model, and if called from the
modules loading scheme (by receiving 'module' in the context), also
create entries in ir_model_data (for the model and the fields).
- create an entry in ir_model (if there is not already one),
- create an entry in ir_model_data (if there is not already one, and if
'module' is in the context),
- update ir_model_fields with the fields found in _columns
(TODO there is some redundancy as _columns is updated from
ir_model_fields in __init__).
"""
if context is None:
context = {}
cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
if not cr.rowcount:
cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
model_id = cr.fetchone()[0]
cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
else:
model_id = cr.fetchone()[0]
if 'module' in context:
name_id = 'model_'+self._name.replace('.', '_')
cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
if not cr.rowcount:
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name_id, context['module'], 'ir.model', model_id)
)
cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
cols = {}
for rec in cr.dictfetchall():
cols[rec['name']] = rec
ir_model_fields_obj = self.pool.get('ir.model.fields')
# sparse field should be created at the end, as it depends on its serialized field already existing
model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
for (k, f) in model_fields:
vals = {
'model_id': model_id,
'model': self._name,
'name': k,
'field_description': f.string,
'ttype': f._type,
'relation': f._obj or '',
'select_level': tools.ustr(f.select or 0),
'readonly': (f.readonly and 1) or 0,
'required': (f.required and 1) or 0,
'selectable': (f.selectable and 1) or 0,
'translate': (f.translate and 1) or 0,
'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
'serialization_field_id': None,
}
if getattr(f, 'serialization_field', None):
# resolve link to serialization_field if specified by name
serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
if not serialization_field_id:
raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
vals['serialization_field_id'] = serialization_field_id[0]
# When its a custom field,it does not contain f.select
if context.get('field_state', 'base') == 'manual':
if context.get('field_name', '') == k:
vals['select_level'] = context.get('select', '0')
#setting value to let the problem NOT occur next time
elif k in cols:
vals['select_level'] = cols[k]['select_level']
if k not in cols:
cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
id = cr.fetchone()[0]
vals['id'] = id
cr.execute("""INSERT INTO ir_model_fields (
id, model_id, model, name, field_description, ttype,
relation,state,select_level,relation_field, translate, serialization_field_id
) VALUES (
%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
)""", (
id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
vals['relation'], 'base',
vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
))
if 'module' in context:
name1 = 'field_' + self._table + '_' + k
cr.execute("select name from ir_model_data where name=%s", (name1,))
if cr.fetchone():
name1 = name1 + "_" + str(id)
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name1, context['module'], 'ir.model.fields', id)
)
else:
for key, val in vals.items():
if cols[k][key] != vals[key]:
cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
cr.execute("""UPDATE ir_model_fields SET
model_id=%s, field_description=%s, ttype=%s, relation=%s,
select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
WHERE
model=%s AND name=%s""", (
vals['model_id'], vals['field_description'], vals['ttype'],
vals['relation'],
vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
))
break
#
# Goal: try to apply inheritance at the instanciation level and
# put objects in the pool var
#
@classmethod
def create_instance(cls, pool, cr):
""" Instanciate a given model.
This class method instanciates the class of some model (i.e. a class
deriving from osv or osv_memory). The class might be the class passed
in argument or, if it inherits from another class, a class constructed
by combining the two classes.
The ``attributes`` argument specifies which parent class attributes
have to be combined.
TODO: the creation of the combined class is repeated at each call of
this method. This is probably unnecessary.
"""
attributes = ['_columns', '_defaults', '_inherits', '_constraints',
'_sql_constraints']
parent_names = getattr(cls, '_inherit', None)
if parent_names:
if isinstance(parent_names, (str, unicode)):
name = cls._name or parent_names
parent_names = [parent_names]
else:
name = cls._name
if not name:
raise TypeError('_name is mandatory in case of multiple inheritance')
for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
if parent_name not in pool:
raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
'You may need to add a dependency on the parent class\' module.' % (name, parent_name))
parent_model = pool[parent_name]
if not getattr(cls, '_original_module', None) and name == parent_model._name:
cls._original_module = parent_model._original_module
parent_class = parent_model.__class__
nattr = {}
for s in attributes:
new = copy.copy(getattr(parent_model, s, {}))
if s == '_columns':
# Don't _inherit custom fields.
for c in new.keys():
if new[c].manual:
del new[c]
if hasattr(new, 'update'):
new.update(cls.__dict__.get(s, {}))
elif s=='_constraints':
for c in cls.__dict__.get(s, []):
exist = False
for c2 in range(len(new)):
#For _constraints, we should check field and methods as well
if new[c2][2]==c[2] and (new[c2][0] == c[0] \
or getattr(new[c2][0],'__name__', True) == \
getattr(c[0],'__name__', False)):
# If new class defines a constraint with
# same function name, we let it override
# the old one.
new[c2] = c
exist = True
break
if not exist:
new.append(c)
else:
new.extend(cls.__dict__.get(s, []))
nattr[s] = new
# Keep links to non-inherited constraints, e.g. useful when exporting translations
nattr['_local_constraints'] = cls.__dict__.get('_constraints', [])
nattr['_local_sql_constraints'] = cls.__dict__.get('_sql_constraints', [])
cls = type(name, (cls, parent_class), dict(nattr, _register=False))
else:
cls._local_constraints = getattr(cls, '_constraints', [])
cls._local_sql_constraints = getattr(cls, '_sql_constraints', [])
if not getattr(cls, '_original_module', None):
cls._original_module = cls._module
obj = object.__new__(cls)
if hasattr(obj, '_columns'):
# float fields are registry-dependent (digit attribute). Duplicate them to avoid issues.
for c, f in obj._columns.items():
if f._type == 'float':
obj._columns[c] = copy.copy(f)
obj.__init__(pool, cr)
return obj
def __new__(cls):
"""Register this model.
This doesn't create an instance but simply register the model
as being part of the module where it is defined.
"""
# Set the module name (e.g. base, sale, accounting, ...) on the class.
module = cls.__module__.split('.')[0]
if not hasattr(cls, '_module'):
cls._module = module
# Record this class in the list of models to instantiate for this module,
# managed by the metaclass.
module_model_list = MetaModel.module_to_models.setdefault(cls._module, [])
if cls not in module_model_list:
if not cls._custom:
module_model_list.append(cls)
# Since we don't return an instance here, the __init__
# method won't be called.
return None
def __init__(self, pool, cr):
""" Initialize a model and make it part of the given registry.
- copy the stored fields' functions in the osv_pool,
- update the _columns with the fields found in ir_model_fields,
- ensure there is a many2one for each _inherits'd parent,
- update the children's _columns,
- give a chance to each field to initialize itself.
"""
pool.add(self._name, self)
self.pool = pool
if not self._name and not hasattr(self, '_inherit'):
name = type(self).__name__.split('.')[0]
msg = "The class %s has to have a _name attribute" % name
_logger.error(msg)
raise except_orm('ValueError', msg)
if not self._description:
self._description = self._name
if not self._table:
self._table = self._name.replace('.', '_')
if not hasattr(self, '_log_access'):
# If _log_access is not specified, it is the same value as _auto.
self._log_access = getattr(self, "_auto", True)
self._columns = self._columns.copy()
for store_field in self._columns:
f = self._columns[store_field]
if hasattr(f, 'digits_change'):
f.digits_change(cr)
def not_this_field(stored_func):
x, y, z, e, f, l = stored_func
return x != self._name or y != store_field
self.pool._store_function[self._name] = filter(not_this_field, self.pool._store_function.get(self._name, []))
if not isinstance(f, fields.function):
continue
if not f.store:
continue
sm = f.store
if sm is True:
sm = {self._name: (lambda self, cr, uid, ids, c={}: ids, None, f.priority, None)}
for object, aa in sm.items():
if len(aa) == 4:
(fnct, fields2, order, length) = aa
elif len(aa) == 3:
(fnct, fields2, order) = aa
length = None
else:
raise except_orm('Error',
('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (store_field, self._name)))
self.pool._store_function.setdefault(object, [])
t = (self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length)
if not t in self.pool._store_function[object]:
self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length))
self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4]))
for (key, _, msg) in self._sql_constraints:
self.pool._sql_error[self._table+'_'+key] = msg
# Load manual fields
# Check the query is already done for all modules of if we need to
# do it ourselves.
if self.pool.fields_by_model is not None:
manual_fields = self.pool.fields_by_model.get(self._name, [])
else:
cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (self._name, 'manual'))
manual_fields = cr.dictfetchall()
for field in manual_fields:
if field['name'] in self._columns:
continue
attrs = {
'string': field['field_description'],
'required': bool(field['required']),
'readonly': bool(field['readonly']),
'domain': eval(field['domain']) if field['domain'] else None,
'size': field['size'] or None,
'ondelete': field['on_delete'],
'translate': (field['translate']),
'manual': True,
'_prefetch': False,
#'select': int(field['select_level'])
}
if field['serialization_field_id']:
cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
if field['ttype'] in ['many2one', 'one2many', 'many2many']:
attrs.update({'relation': field['relation']})
self._columns[field['name']] = fields.sparse(**attrs)
elif field['ttype'] == 'selection':
self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
elif field['ttype'] == 'reference':
self._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
elif field['ttype'] == 'many2one':
self._columns[field['name']] = fields.many2one(field['relation'], **attrs)
elif field['ttype'] == 'one2many':
self._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
elif field['ttype'] == 'many2many':
_rel1 = field['relation'].replace('.', '_')
_rel2 = field['model'].replace('.', '_')
_rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
self._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
else:
self._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
self._inherits_check()
self._inherits_reload()
if not self._sequence:
self._sequence = self._table + '_id_seq'
for k in self._defaults:
assert (k in self._columns) or (k in self._inherit_fields), 'Default function defined in %s but field %s does not exist !' % (self._name, k,)
for f in self._columns:
self._columns[f].restart()
# Transience
if self.is_transient():
self._transient_check_count = 0
self._transient_max_count = config.get('osv_memory_count_limit')
self._transient_max_hours = config.get('osv_memory_age_limit')
assert self._log_access, "TransientModels must have log_access turned on, "\
"in order to implement their access rights policy"
# Validate rec_name
if self._rec_name is not None:
assert self._rec_name in self._all_columns.keys() + ['id'], "Invalid rec_name %s for model %s" % (self._rec_name, self._name)
else:
self._rec_name = 'name'
def __export_row(self, cr, uid, row, fields, raw_data=False, context=None):
if context is None:
context = {}
def check_type(field_type):
if field_type == 'float':
return 0.0
elif field_type == 'integer':
return 0
elif field_type == 'boolean':
return 'False'
return ''
def selection_field(in_field):
col_obj = self.pool[in_field.keys()[0]]
if f[i] in col_obj._columns.keys():
return col_obj._columns[f[i]]
elif f[i] in col_obj._inherits.keys():
selection_field(col_obj._inherits)
else:
return False
def _get_xml_id(self, cr, uid, r):
model_data = self.pool.get('ir.model.data')
data_ids = model_data.search(cr, uid, [('model', '=', r._model._name), ('res_id', '=', r['id'])])
if len(data_ids):
d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0]
if d['module']:
r = '%s.%s' % (d['module'], d['name'])
else:
r = d['name']
else:
postfix = 0
while True:
n = r._model._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
if not model_data.search(cr, uid, [('name', '=', n)]):
break
postfix += 1
model_data.create(cr, SUPERUSER_ID, {
'name': n,
'model': r._model._name,
'res_id': r['id'],
'module': '__export__',
})
r = '__export__.'+n
return r
lines = []
data = map(lambda x: '', range(len(fields)))
done = []
for fpos in range(len(fields)):
f = fields[fpos]
if f:
r = row
i = 0
while i < len(f):
cols = False
if f[i] == '.id':
r = r['id']
elif f[i] == 'id':
r = _get_xml_id(self, cr, uid, r)
else:
r = r[f[i]]
# To display external name of selection field when its exported
if f[i] in self._columns.keys():
cols = self._columns[f[i]]
elif f[i] in self._inherit_fields.keys():
cols = selection_field(self._inherits)
if cols and cols._type == 'selection':
sel_list = cols.selection
if r and type(sel_list) == type([]):
r = [x[1] for x in sel_list if r==x[0]]
r = r and r[0] or False
if not r:
if f[i] in self._columns:
r = check_type(self._columns[f[i]]._type)
elif f[i] in self._inherit_fields:
r = check_type(self._inherit_fields[f[i]][2]._type)
data[fpos] = r or False
break
if isinstance(r, (browse_record_list, list)):
first = True
fields2 = map(lambda x: (x[:i+1]==f[:i+1] and x[i+1:]) \
or [], fields)
if fields2 in done:
if [x for x in fields2 if x]:
break
done.append(fields2)
if cols and cols._type=='many2many' and len(fields[fpos])>(i+1) and (fields[fpos][i+1]=='id'):
data[fpos] = ','.join([_get_xml_id(self, cr, uid, x) for x in r])
break
for row2 in r:
lines2 = row2._model.__export_row(cr, uid, row2, fields2, context=context)
if first:
for fpos2 in range(len(fields)):
if lines2 and lines2[0][fpos2]:
data[fpos2] = lines2[0][fpos2]
if not data[fpos]:
dt = ''
for rr in r:
name_relation = self.pool[rr._table_name]._rec_name
if isinstance(rr[name_relation], browse_record):
rr = rr[name_relation]
rr_name = self.pool[rr._table_name].name_get(cr, uid, [rr.id], context=context)
rr_name = rr_name and rr_name[0] and rr_name[0][1] or ''
dt += tools.ustr(rr_name or '') + ','
data[fpos] = dt[:-1]
break
lines += lines2[1:]
first = False
else:
lines += lines2
break
i += 1
if i == len(f):
if isinstance(r, browse_record):
r = self.pool[r._table_name].name_get(cr, uid, [r.id], context=context)
r = r and r[0] and r[0][1] or ''
if raw_data and cols and cols._type in ('integer', 'boolean', 'float'):
data[fpos] = r
elif raw_data and cols and cols._type == 'date':
data[fpos] = datetime.datetime.strptime(r, tools.DEFAULT_SERVER_DATE_FORMAT).date()
elif raw_data and cols and cols._type == 'datetime':
data[fpos] = datetime.datetime.strptime(r, tools.DEFAULT_SERVER_DATETIME_FORMAT)
else:
data[fpos] = tools.ustr(r or '')
return [data] + lines
def export_data(self, cr, uid, ids, fields_to_export, raw_data=False, context=None):
"""
Export fields for selected objects
:param cr: database cursor
:param uid: current user id
:param ids: list of ids
:param fields_to_export: list of fields
:param raw_data: True to return value in fields type, False for string values
:param context: context arguments, like lang, time zone
:rtype: dictionary with a *datas* matrix
This method is used when exporting data via client menu
"""
if context is None:
context = {}
cols = self._columns.copy()
for f in self._inherit_fields:
cols.update({f: self._inherit_fields[f][2]})
fields_to_export = map(fix_import_export_id_paths, fields_to_export)
datas = []
for row in self.browse(cr, uid, ids, context):
datas += self.__export_row(cr, uid, row, fields_to_export, raw_data=raw_data, context=context)
return {'datas': datas}
def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
"""
.. deprecated:: 7.0
Use :meth:`~load` instead
Import given data in given module
This method is used when importing data via client menu.
Example of fields to import for a sale.order::
.id, (=database_id)
partner_id, (=name_search)
order_line/.id, (=database_id)
order_line/name,
order_line/product_id/id, (=xml id)
order_line/price_unit,
order_line/product_uom_qty,
order_line/product_uom/id (=xml_id)
This method returns a 4-tuple with the following structure::
(return_code, errored_resource, error_message, unused)
* The first item is a return code, it is ``-1`` in case of
import error, or the last imported row number in case of success
* The second item contains the record data dict that failed to import
in case of error, otherwise it's 0
* The third item contains an error message string in case of error,
otherwise it's 0
* The last item is currently unused, with no specific semantics
:param fields: list of fields to import
:param datas: data to import
:param mode: 'init' or 'update' for record creation
:param current_module: module name
:param noupdate: flag for record creation
:param filename: optional file to store partial import state for recovery
:returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
:rtype: (int, dict or 0, str or 0, str or 0)
"""
context = dict(context) if context is not None else {}
context['_import_current_module'] = current_module
fields = map(fix_import_export_id_paths, fields)
ir_model_data_obj = self.pool.get('ir.model.data')
def log(m):
if m['type'] == 'error':
raise Exception(m['message'])
if config.get('import_partial') and filename:
with open(config.get('import_partial'), 'rb') as partial_import_file:
data = pickle.load(partial_import_file)
position = data.get(filename, 0)
position = 0
try:
cr.execute('SAVEPOINT convert_records')
for res_id, xml_id, res, info in self._convert_records(cr, uid,
self._extract_records(cr, uid, fields, datas,
context=context, log=log),
context=context, log=log):
ir_model_data_obj._update(cr, uid, self._name,
current_module, res, mode=mode, xml_id=xml_id,
noupdate=noupdate, res_id=res_id, context=context)
position = info.get('rows', {}).get('to', 0) + 1
if config.get('import_partial') and filename and (not (position%100)):
with open(config.get('import_partial'), 'rb') as partial_import:
data = pickle.load(partial_import)
data[filename] = position
with open(config.get('import_partial'), 'wb') as partial_import:
pickle.dump(data, partial_import)
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
cr.commit()
cr.execute('RELEASE SAVEPOINT convert_records')
except Exception, e:
cr.execute('ROLLBACK TO SAVEPOINT convert_records')
return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
return position, 0, 0, 0
def load(self, cr, uid, fields, data, context=None):
"""
Attempts to load the data matrix, and returns a list of ids (or
``False`` if there was an error and no id could be generated) and a
list of messages.
The ids are those of the records created and saved (in database), in
the same order they were extracted from the file. They can be passed
directly to :meth:`~read`
:param fields: list of fields to import, at the same index as the corresponding data
:type fields: list(str)
:param data: row-major matrix of data to import
:type data: list(list(str))
:param dict context:
:returns: {ids: list(int)|False, messages: [Message]}
"""
cr.execute('SAVEPOINT model_load')
messages = []
fields = map(fix_import_export_id_paths, fields)
ModelData = self.pool['ir.model.data'].clear_caches()
fg = self.fields_get(cr, uid, context=context)
mode = 'init'
current_module = ''
noupdate = False
ids = []
for id, xid, record, info in self._convert_records(cr, uid,
self._extract_records(cr, uid, fields, data,
context=context, log=messages.append),
context=context, log=messages.append):
try:
cr.execute('SAVEPOINT model_load_save')
except psycopg2.InternalError, e:
# broken transaction, exit and hope the source error was
# already logged
if not any(message['type'] == 'error' for message in messages):
messages.append(dict(info, type='error',message=
u"Unknown database error: '%s'" % e))
break
try:
ids.append(ModelData._update(cr, uid, self._name,
current_module, record, mode=mode, xml_id=xid,
noupdate=noupdate, res_id=id, context=context))
cr.execute('RELEASE SAVEPOINT model_load_save')
except psycopg2.Warning, e:
messages.append(dict(info, type='warning', message=str(e)))
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
except psycopg2.Error, e:
messages.append(dict(
info, type='error',
**PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
# Failed to write, log to messages, rollback savepoint (to
# avoid broken transaction) and keep going
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
if any(message['type'] == 'error' for message in messages):
cr.execute('ROLLBACK TO SAVEPOINT model_load')
ids = False
return {'ids': ids, 'messages': messages}
def _extract_records(self, cr, uid, fields_, data,
context=None, log=lambda a: None):
""" Generates record dicts from the data sequence.
The result is a generator of dicts mapping field names to raw
(unconverted, unvalidated) values.
For relational fields, if sub-fields were provided the value will be
a list of sub-records
The following sub-fields may be set on the record (by key):
* None is the name_get for the record (to use with name_create/name_search)
* "id" is the External ID for the record
* ".id" is the Database ID for the record
"""
columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
# Fake columns to avoid special cases in extractor
columns[None] = fields.char('rec_name')
columns['id'] = fields.char('External ID')
columns['.id'] = fields.integer('Database ID')
# m2o fields can't be on multiple lines so exclude them from the
# is_relational field rows filter, but special-case it later on to
# be handled with relational fields (as it can have subfields)
is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
get_o2m_values = itemgetter_tuple(
[index for index, field in enumerate(fields_)
if columns[field[0]]._type == 'one2many'])
get_nono2m_values = itemgetter_tuple(
[index for index, field in enumerate(fields_)
if columns[field[0]]._type != 'one2many'])
# Checks if the provided row has any non-empty non-relational field
def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
return any(g(row)) and not any(f(row))
index = 0
while True:
if index >= len(data): return
row = data[index]
# copy non-relational fields to record dict
record = dict((field[0], value)
for field, value in itertools.izip(fields_, row)
if not is_relational(field[0]))
# Get all following rows which have relational values attached to
# the current record (no non-relational values)
record_span = itertools.takewhile(
only_o2m_values, itertools.islice(data, index + 1, None))
# stitch record row back on for relational fields
record_span = list(itertools.chain([row], record_span))
for relfield in set(
field[0] for field in fields_
if is_relational(field[0])):
column = columns[relfield]
# FIXME: how to not use _obj without relying on fields_get?
Model = self.pool[column._obj]
# get only cells for this sub-field, should be strictly
# non-empty, field path [None] is for name_get column
indices, subfields = zip(*((index, field[1:] or [None])
for index, field in enumerate(fields_)
if field[0] == relfield))
# return all rows which have at least one value for the
# subfields of relfield
relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
record[relfield] = [subrecord
for subrecord, _subinfo in Model._extract_records(
cr, uid, subfields, relfield_data,
context=context, log=log)]
yield record, {'rows': {
'from': index,
'to': index + len(record_span) - 1
}}
index += len(record_span)
def _convert_records(self, cr, uid, records,
context=None, log=lambda a: None):
""" Converts records from the source iterable (recursive dicts of
strings) into forms which can be written to the database (via
self.create or (ir.model.data)._update)
:returns: a list of triplets of (id, xid, record)
:rtype: list((int|None, str|None, dict))
"""
if context is None: context = {}
Converter = self.pool['ir.fields.converter']
columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
Translation = self.pool['ir.translation']
field_names = dict(
(f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
context.get('lang'))
or column.string))
for f, column in columns.iteritems())
convert = Converter.for_model(cr, uid, self, context=context)
def _log(base, field, exception):
type = 'warning' if isinstance(exception, Warning) else 'error'
# logs the logical (not human-readable) field name for automated
# processing of response, but injects human readable in message
record = dict(base, type=type, field=field,
message=unicode(exception.args[0]) % base)
if len(exception.args) > 1 and exception.args[1]:
record.update(exception.args[1])
log(record)
stream = CountingStream(records)
for record, extras in stream:
dbid = False
xid = False
# name_get/name_create
if None in record: pass
# xid
if 'id' in record:
xid = record['id']
# dbid
if '.id' in record:
try:
dbid = int(record['.id'])
except ValueError:
# in case of overridden id column
dbid = record['.id']
if not self.search(cr, uid, [('id', '=', dbid)], context=context):
log(dict(extras,
type='error',
record=stream.index,
field='.id',
message=_(u"Unknown database identifier '%s'") % dbid))
dbid = False
converted = convert(record, lambda field, err:\
_log(dict(extras, record=stream.index, field=field_names[field]), field, err))
yield dbid, xid, converted, dict(extras, record=stream.index)
def _validate(self, cr, uid, ids, context=None):
context = context or {}
lng = context.get('lang')
trans = self.pool.get('ir.translation')
error_msgs = []
for constraint in self._constraints:
fun, msg, fields = constraint
try:
# We don't pass around the context here: validation code
# must always yield the same results.
valid = fun(self, cr, uid, ids)
extra_error = None
except Exception, e:
_logger.debug('Exception while validating constraint', exc_info=True)
valid = False
extra_error = tools.ustr(e)
if not valid:
# Check presence of __call__ directly instead of using
# callable() because it will be deprecated as of Python 3.0
if hasattr(msg, '__call__'):
translated_msg = msg(self, cr, uid, ids, context=context)
if isinstance(translated_msg, tuple):
translated_msg = translated_msg[0] % translated_msg[1]
else:
translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, msg)
if extra_error:
translated_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
error_msgs.append(
_("The field(s) `%s` failed against a constraint: %s") % (', '.join(fields), translated_msg)
)
if error_msgs:
# OpenUpgrade: do not raise for obsolete fields
# raise except_orm('ValidateError', '\n'.join(error_msgs))
_logger.debug('OpenUpgrade: View error has not been raised. %s', '\n'.join(error_msgs))
def default_get(self, cr, uid, fields_list, context=None):
"""
Returns default values for the fields in fields_list.
:param fields_list: list of fields to get the default values for (example ['field1', 'field2',])
:type fields_list: list
:param context: optional context dictionary - it may contains keys for specifying certain options
like ``context_lang`` (language) or ``context_tz`` (timezone) to alter the results of the call.
It may contain keys in the form ``default_XXX`` (where XXX is a field name), to set
or override a default value for a field.
A special ``bin_size`` boolean flag may also be passed in the context to request the
value of all fields.binary columns to be returned as the size of the binary instead of its
contents. This can also be selectively overriden by passing a field-specific flag
in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
:return: dictionary of the default values (set on the object model class, through user preferences, or in the context)
"""
# trigger view init hook
self.view_init(cr, uid, fields_list, context)
if not context:
context = {}
defaults = {}
# get the default values for the inherited fields
for t in self._inherits.keys():
defaults.update(self.pool[t].default_get(cr, uid, fields_list, context))
# get the default values defined in the object
for f in fields_list:
if f in self._defaults:
if callable(self._defaults[f]):
defaults[f] = self._defaults[f](self, cr, uid, context)
else:
defaults[f] = self._defaults[f]
fld_def = ((f in self._columns) and self._columns[f]) \
or ((f in self._inherit_fields) and self._inherit_fields[f][2]) \
or False
if isinstance(fld_def, fields.property):
property_obj = self.pool.get('ir.property')
prop_value = property_obj.get(cr, uid, f, self._name, context=context)
if prop_value:
if isinstance(prop_value, (browse_record, browse_null)):
defaults[f] = prop_value.id
else:
defaults[f] = prop_value
else:
if f not in defaults:
defaults[f] = False
# get the default values set by the user and override the default
# values defined in the object
ir_values_obj = self.pool.get('ir.values')
res = ir_values_obj.get(cr, uid, 'default', False, [self._name])
for id, field, field_value in res:
if field in fields_list:
fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2]
if fld_def._type == 'many2one':
obj = self.pool[fld_def._obj]
if not obj.search(cr, uid, [('id', '=', field_value or False)]):
continue
if fld_def._type == 'many2many':
obj = self.pool[fld_def._obj]
field_value2 = []
for i in range(len(field_value or [])):
if not obj.search(cr, uid, [('id', '=',
field_value[i])]):
continue
field_value2.append(field_value[i])
field_value = field_value2
if fld_def._type == 'one2many':
obj = self.pool[fld_def._obj]
field_value2 = []
for i in range(len(field_value or [])):
field_value2.append({})
for field2 in field_value[i]:
if field2 in obj._columns.keys() and obj._columns[field2]._type == 'many2one':
obj2 = self.pool[obj._columns[field2]._obj]
if not obj2.search(cr, uid,
[('id', '=', field_value[i][field2])]):
continue
elif field2 in obj._inherit_fields.keys() and obj._inherit_fields[field2][2]._type == 'many2one':
obj2 = self.pool[obj._inherit_fields[field2][2]._obj]
if not obj2.search(cr, uid,
[('id', '=', field_value[i][field2])]):
continue
# TODO add test for many2many and one2many
field_value2[i][field2] = field_value[i][field2]
field_value = field_value2
defaults[field] = field_value
# get the default values from the context
for key in context or {}:
if key.startswith('default_') and (key[8:] in fields_list):
defaults[key[8:]] = context[key]
return defaults
def fields_get_keys(self, cr, user, context=None):
res = self._columns.keys()
# TODO I believe this loop can be replace by
# res.extend(self._inherit_fields.key())
for parent in self._inherits:
res.extend(self.pool[parent].fields_get_keys(cr, user, context))
return res
def _rec_name_fallback(self, cr, uid, context=None):
rec_name = self._rec_name
if rec_name not in self._columns:
rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
return rec_name
#
# Overload this method if you need a window title which depends on the context
#
def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
return False
def user_has_groups(self, cr, uid, groups, context=None):
"""Return true if the user is at least member of one of the groups
in groups_str. Typically used to resolve ``groups`` attribute
in view and model definitions.
:param str groups: comma-separated list of fully-qualified group
external IDs, e.g.: ``base.group_user,base.group_system``
:return: True if the current user is a member of one of the
given groups
"""
return any([self.pool.get('res.users').has_group(cr, uid, group_ext_id)
for group_ext_id in groups.split(',')])
def _get_default_form_view(self, cr, user, context=None):
""" Generates a default single-line form view using all fields
of the current model except the m2m and o2m ones.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a form view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('form', string=self._description)
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
for field, descriptor in self.fields_get(cr, user, context=context).iteritems():
if descriptor['type'] in ('one2many', 'many2many'):
continue
etree.SubElement(view, 'field', name=field)
if descriptor['type'] == 'text':
etree.SubElement(view, 'newline')
return view
def _get_default_search_view(self, cr, user, context=None):
""" Generates a single-field search view, based on _rec_name.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('search', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
return view
def _get_default_tree_view(self, cr, user, context=None):
""" Generates a single-field tree view, based on _rec_name.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('tree', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
return view
def _get_default_calendar_view(self, cr, user, context=None):
""" Generates a default calendar view by trying to infer
calendar fields from a number of pre-set attribute names
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a calendar view
:rtype: etree._Element
"""
def set_first_of(seq, in_, to):
"""Sets the first value of ``seq`` also found in ``in_`` to
the ``to`` attribute of the view being closed over.
Returns whether it's found a suitable value (and set it on
the attribute) or not
"""
for item in seq:
if item in in_:
view.set(to, item)
return True
return False
view = etree.Element('calendar', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
if self._date_name not in self._columns:
date_found = False
for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
if dt in self._columns:
self._date_name = dt
date_found = True
break
if not date_found:
raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
view.set('date_start', self._date_name)
set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
self._columns, 'color')
if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
self._columns, 'date_stop'):
if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
self._columns, 'date_delay'):
raise except_orm(
_('Invalid Object Architecture!'),
_("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
return view
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
"""
Get the detailed composition of the requested view like fields, model, view architecture
:param view_id: id of the view or None
:param view_type: type of the view to return if view_id is None ('form', tree', ...)
:param toolbar: true to include contextual actions
:param submenu: deprecated
:return: dictionary describing the composition of the requested view (including inherited views and extensions)
:raise AttributeError:
* if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
* if some tag other than 'position' is found in parent view
:raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
"""
if context is None:
context = {}
View = self.pool['ir.ui.view']
result = {
'model': self._name,
'field_parent': False,
}
# try to find a view_id if none provided
if not view_id:
# <view_type>_view_ref in context can be used to overrride the default view
view_ref_key = view_type + '_view_ref'
view_ref = context.get(view_ref_key)
if view_ref:
if '.' in view_ref:
module, view_ref = view_ref.split('.', 1)
cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
view_ref_res = cr.fetchone()
if view_ref_res:
view_id = view_ref_res[0]
else:
_logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
self._name)
if not view_id:
# otherwise try to find the lowest priority matching ir.ui.view
view_id = View.default_view(cr, uid, self._name, view_type, context=context)
# context for post-processing might be overriden
ctx = context
if view_id:
# read the view with inherited views applied
root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
result['arch'] = root_view['arch']
result['name'] = root_view['name']
result['type'] = root_view['type']
result['view_id'] = root_view['id']
result['field_parent'] = root_view['field_parent']
# override context fro postprocessing
if root_view.get('model') != self._name:
ctx = dict(context, base_model_name=root_view.get('model'))
else:
# fallback on default views methods if no ir.ui.view could be found
try:
get_func = getattr(self, '_get_default_%s_view' % view_type)
arch_etree = get_func(cr, uid, context)
result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
result['type'] = view_type
result['name'] = 'default'
except AttributeError:
raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
# Apply post processing, groups and modifiers etc...
xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
result['arch'] = xarch
result['fields'] = xfields
# Add related action information if aksed
if toolbar:
toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
def clean(x):
x = x[2]
for key in toclean:
x.pop(key, None)
return x
ir_values_obj = self.pool.get('ir.values')
resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
#When multi="True" set it will display only in More of the list view
resrelate = [clean(action) for action in resrelate
if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
for x in itertools.chain(resprint, resaction, resrelate):
x['string'] = x['name']
result['toolbar'] = {
'print': resprint,
'action': resaction,
'relate': resrelate
}
return result
def get_formview_id(self, cr, uid, id, context=None):
""" Return an view id to open the document with. This method is meant to be
overridden in addons that want to give specific view ids for example.
:param int id: id of the document to open
"""
return False
def get_formview_action(self, cr, uid, id, context=None):
""" Return an action to open the document. This method is meant to be
overridden in addons that want to give specific view ids for example.
:param int id: id of the document to open
"""
view_id = self.get_formview_id(cr, uid, id, context=context)
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'view_type': 'form',
'view_mode': 'form',
'views': [(view_id, 'form')],
'target': 'current',
'res_id': id,
}
def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
return self.pool['ir.ui.view'].postprocess_and_fields(
cr, uid, self._name, node, view_id, context=context)
def search_count(self, cr, user, args, context=None):
res = self.search(cr, user, args, context=context, count=True)
if isinstance(res, list):
return len(res)
return res
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
"""
Search for records based on a search domain.
:param cr: database cursor
:param user: current user id
:param args: list of tuples specifying the search domain [('field_name', 'operator', value), ...]. Pass an empty list to match all records.
:param offset: optional number of results to skip in the returned values (default: 0)
:param limit: optional max number of records to return (default: **None**)
:param order: optional columns to sort by (default: self._order=id )
:param context: optional context arguments, like lang, time zone
:type context: dictionary
:param count: optional (default: **False**), if **True**, returns only the number of records matching the criteria, not their ids
:return: id or list of ids of records matching the criteria
:rtype: integer or list of integers
:raise AccessError: * if user tries to bypass access rules for read on the requested object.
**Expressing a search domain (args)**
Each tuple in the search domain needs to have 3 elements, in the form: **('field_name', 'operator', value)**, where:
* **field_name** must be a valid name of field of the object model, possibly following many-to-one relationships using dot-notation, e.g 'street' or 'partner_id.country' are valid values.
* **operator** must be a string with a valid comparison operator from this list: ``=, !=, >, >=, <, <=, like, ilike, in, not in, child_of, parent_left, parent_right``
The semantics of most of these operators are obvious.
The ``child_of`` operator will look for records who are children or grand-children of a given record,
according to the semantics of this model (i.e following the relationship field named by
``self._parent_name``, by default ``parent_id``.
* **value** must be a valid value to compare with the values of **field_name**, depending on its type.
Domain criteria can be combined using 3 logical operators than can be added between tuples: '**&**' (logical AND, default), '**|**' (logical OR), '**!**' (logical NOT).
These are **prefix** operators and the arity of the '**&**' and '**|**' operator is 2, while the arity of the '**!**' is just 1.
Be very careful about this when you combine them the first time.
Here is an example of searching for Partners named *ABC* from Belgium and Germany whose language is not english ::
[('name','=','ABC'),'!',('language.code','=','en_US'),'|',('country_id.code','=','be'),('country_id.code','=','de'))
The '&' is omitted as it is the default, and of course we could have used '!=' for the language, but what this domain really represents is::
(name is 'ABC' AND (language is NOT english) AND (country is Belgium OR Germany))
"""
return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
def name_get(self, cr, user, ids, context=None):
"""Returns the preferred display value (text representation) for the records with the
given ``ids``. By default this will be the value of the ``name`` column, unless
the model implements a custom behavior.
Can sometimes be seen as the inverse function of :meth:`~.name_search`, but it is not
guaranteed to be.
:rtype: list(tuple)
:return: list of pairs ``(id,text_repr)`` for all records with the given ``ids``.
"""
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
if self._rec_name in self._all_columns:
rec_name_column = self._all_columns[self._rec_name].column
return [(r['id'], rec_name_column.as_display_name(cr, user, self, r[self._rec_name], context=context))
for r in self.read(cr, user, ids, [self._rec_name],
load='_classic_write', context=context)]
return [(id, "%s,%s" % (self._name, id)) for id in ids]
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
"""Search for records that have a display name matching the given ``name`` pattern if compared
with the given ``operator``, while also matching the optional search domain (``args``).
This is used for example to provide suggestions based on a partial value for a relational
field.
Sometimes be seen as the inverse function of :meth:`~.name_get`, but it is not
guaranteed to be.
This method is equivalent to calling :meth:`~.search` with a search domain based on ``name``
and then :meth:`~.name_get` on the result of the search.
:param list args: optional search domain (see :meth:`~.search` for syntax),
specifying further restrictions
:param str operator: domain operator for matching the ``name`` pattern, such as ``'like'``
or ``'='``.
:param int limit: optional max number of records to return
:rtype: list
:return: list of pairs ``(id,text_repr)`` for all matching records.
"""
return self._name_search(cr, user, name, args, operator, context, limit)
def name_create(self, cr, uid, name, context=None):
"""Creates a new record by calling :meth:`~.create` with only one
value provided: the name of the new record (``_rec_name`` field).
The new record will also be initialized with any default values applicable
to this model, or provided through the context. The usual behavior of
:meth:`~.create` applies.
Similarly, this method may raise an exception if the model has multiple
required fields and some do not have default values.
:param name: name of the record to create
:rtype: tuple
:return: the :meth:`~.name_get` pair value for the newly-created record.
"""
rec_id = self.create(cr, uid, {self._rec_name: name}, context)
return self.name_get(cr, uid, [rec_id], context)[0]
# private implementation of name_search, allows passing a dedicated user for the name_get part to
# solve some access rights issues
def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
if context is None:
context = {}
args = args[:]
# optimize out the default criterion of ``ilike ''`` that matches everything
if not (name == '' and operator == 'ilike'):
args += [(self._rec_name, operator, name)]
access_rights_uid = name_get_uid or user
ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
res = self.name_get(cr, access_rights_uid, ids, context)
return res
def read_string(self, cr, uid, id, langs, fields=None, context=None):
res = {}
res2 = {}
self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
if not fields:
fields = self._columns.keys() + self._inherit_fields.keys()
#FIXME: collect all calls to _get_source into one SQL call.
for lang in langs:
res[lang] = {'code': lang}
for f in fields:
if f in self._columns:
res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
if res_trans:
res[lang][f] = res_trans
else:
res[lang][f] = self._columns[f].string
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), fields)
res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
for lang in res2:
if lang in res:
res[lang]['code'] = lang
for f in res2[lang]:
res[lang][f] = res2[lang][f]
return res
def write_string(self, cr, uid, id, langs, vals, context=None):
self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
#FIXME: try to only call the translation in one SQL
for lang in langs:
for field in vals:
if field in self._columns:
src = self._columns[field].string
self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), vals)
if cols:
self.pool[table].write_string(cr, uid, id, langs, vals, context)
return True
def _add_missing_default_values(self, cr, uid, values, context=None):
missing_defaults = []
avoid_tables = [] # avoid overriding inherited values when parent is set
for tables, parent_field in self._inherits.items():
if parent_field in values:
avoid_tables.append(tables)
for field in self._columns.keys():
if not field in values:
missing_defaults.append(field)
for field in self._inherit_fields.keys():
if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
missing_defaults.append(field)
if len(missing_defaults):
# override defaults with the provided values, never allow the other way around
defaults = self.default_get(cr, uid, missing_defaults, context)
for dv in defaults:
if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
defaults[dv] = [(6, 0, defaults[dv])]
if (dv in self._columns and self._columns[dv]._type == 'one2many' \
or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
defaults[dv] = [(0, 0, x) for x in defaults[dv]]
defaults.update(values)
values = defaults
return values
def clear_caches(self):
""" Clear the caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi``.
"""
try:
getattr(self, '_ormcache')
self._ormcache = {}
self.pool._any_cache_cleared = True
except AttributeError:
pass
def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys, aggregated_fields,
read_group_result, read_group_order=None, context=None):
"""Helper method for filling in empty groups for all possible values of
the field being grouped by"""
# self._group_by_full should map groupable fields to a method that returns
# a list of all aggregated values that we want to display for this field,
# in the form of a m2o-like pair (key,label).
# This is useful to implement kanban views for instance, where all columns
# should be displayed even if they don't contain any record.
# Grab the list of all groups that should be displayed, including all present groups
present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
read_group_order=read_group_order,
access_rights_uid=openerp.SUPERUSER_ID,
context=context)
result_template = dict.fromkeys(aggregated_fields, False)
result_template[groupby + '_count'] = 0
if remaining_groupbys:
result_template['__context'] = {'group_by': remaining_groupbys}
# Merge the left_side (current results as dicts) with the right_side (all
# possible values as m2o pairs). Both lists are supposed to be using the
# same ordering, and can be merged in one pass.
result = []
known_values = {}
def append_left(left_side):
grouped_value = left_side[groupby] and left_side[groupby][0]
if not grouped_value in known_values:
result.append(left_side)
known_values[grouped_value] = left_side
else:
count_attr = groupby + '_count'
known_values[grouped_value].update({count_attr: left_side[count_attr]})
def append_right(right_side):
grouped_value = right_side[0]
if not grouped_value in known_values:
line = dict(result_template)
line[groupby] = right_side
line['__domain'] = [(groupby,'=',grouped_value)] + domain
result.append(line)
known_values[grouped_value] = line
while read_group_result or all_groups:
left_side = read_group_result[0] if read_group_result else None
right_side = all_groups[0] if all_groups else None
assert left_side is None or left_side[groupby] is False \
or isinstance(left_side[groupby], (tuple,list)), \
'M2O-like pair expected, got %r' % left_side[groupby]
assert right_side is None or isinstance(right_side, (tuple,list)), \
'M2O-like pair expected, got %r' % right_side
if left_side is None:
append_right(all_groups.pop(0))
elif right_side is None:
append_left(read_group_result.pop(0))
elif left_side[groupby] == right_side:
append_left(read_group_result.pop(0))
all_groups.pop(0) # discard right_side
elif not left_side[groupby] or not left_side[groupby][0]:
# left side == "Undefined" entry, not present on right_side
append_left(read_group_result.pop(0))
else:
append_right(all_groups.pop(0))
if folded:
for r in result:
r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
return result
def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
"""
Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
to the query if order should be computed against m2o field.
:param orderby: the orderby definition in the form "%(field)s %(order)s"
:param aggregated_fields: list of aggregated fields in the query
:param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
These dictionaries contains the qualified name of each groupby
(fully qualified SQL name for the corresponding field),
and the (non raw) field name.
:param osv.Query query: the query under construction
:return: (groupby_terms, orderby_terms)
"""
orderby_terms = []
groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
if not orderby:
return groupby_terms, orderby_terms
self._check_qorder(orderby)
for order_part in orderby.split(','):
order_split = order_part.split()
order_field = order_split[0]
if order_field in groupby_fields:
if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
if order_clause:
orderby_terms.append(order_clause)
groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
else:
order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
orderby_terms.append(order)
elif order_field in aggregated_fields:
orderby_terms.append(order_part)
else:
# Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
_logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
self._name, order_part)
return groupby_terms, orderby_terms
def _read_group_process_groupby(self, gb, query, context):
"""
Helper method to collect important information about groupbys: raw
field name, type, time informations, qualified name, ...
"""
split = gb.split(':')
field_type = self._all_columns[split[0]].column._type
gb_function = split[1] if len(split) == 2 else None
temporal = field_type in ('date', 'datetime')
tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
qualified_field = self._inherits_join_calc(split[0], query)
if temporal:
display_formats = {
'day': 'dd MMM YYYY',
'week': "'W'w YYYY",
'month': 'MMMM YYYY',
'quarter': 'QQQ YYYY',
'year': 'YYYY'
}
time_intervals = {
'day': dateutil.relativedelta.relativedelta(days=1),
'week': datetime.timedelta(days=7),
'month': dateutil.relativedelta.relativedelta(months=1),
'quarter': dateutil.relativedelta.relativedelta(months=3),
'year': dateutil.relativedelta.relativedelta(years=1)
}
if tz_convert:
qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
if field_type == 'boolean':
qualified_field = "coalesce(%s,false)" % qualified_field
return {
'field': split[0],
'groupby': gb,
'type': field_type,
'display_format': display_formats[gb_function or 'month'] if temporal else None,
'interval': time_intervals[gb_function or 'month'] if temporal else None,
'tz_convert': tz_convert,
'qualified_field': qualified_field
}
def _read_group_prepare_data(self, key, value, groupby_dict, context):
"""
Helper method to sanitize the data received by read_group. The None
values are converted to False, and the date/datetime are formatted,
and corrected according to the timezones.
"""
value = False if value is None else value
gb = groupby_dict.get(key)
if gb and gb['type'] in ('date', 'datetime') and value:
if isinstance(value, basestring):
dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
value = datetime.datetime.strptime(value, dt_format)
if gb['tz_convert']:
value = pytz.timezone(context['tz']).localize(value)
return value
def _read_group_get_domain(self, groupby, value):
"""
Helper method to construct the domain corresponding to a groupby and
a given value. This is mostly relevant for date/datetime.
"""
if groupby['type'] in ('date', 'datetime') and value:
dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
domain_dt_begin = value
domain_dt_end = value + groupby['interval']
if groupby['tz_convert']:
domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
domain_dt_end = domain_dt_end.astimezone(pytz.utc)
return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
(groupby['field'], '<', domain_dt_end.strftime(dt_format))]
if groupby['type'] == 'many2one' and value:
value = value[0]
return [(groupby['field'], '=', value)]
def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
"""
Helper method to format the data contained in the dictianary data by
adding the domain corresponding to its values, the groupbys in the
context and by properly formatting the date/datetime values.
"""
domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
for k,v in data.iteritems():
gb = groupby_dict.get(k)
if gb and gb['type'] in ('date', 'datetime') and v:
data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
data['__domain'] = domain_group + domain
if len(groupby) - len(annotated_groupbys) >= 1:
data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
del data['id']
return data
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
"""
Get the list of records in list view grouped by the given ``groupby`` fields
:param cr: database cursor
:param uid: current user id
:param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
:param list fields: list of fields present in the list view specified on the object
:param list groupby: list of groupby descriptions by which the records will be grouped.
A groupby description is either a field (then it will be grouped by that field)
or a string 'field:groupby_function'. Right now, the only functions supported
are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
date/datetime fields.
:param int offset: optional number of records to skip
:param int limit: optional max number of records to return
:param dict context: context arguments, like lang, time zone.
:param list orderby: optional ``order by`` specification, for
overriding the natural sort ordering of the
groups, see also :py:meth:`~osv.osv.osv.search`
(supported only for many2one fields currently)
:param bool lazy: if true, the results are only grouped by the first groupby and the
remaining groupbys are put in the __context key. If false, all the groupbys are
done in one call.
:return: list of dictionaries(one dictionary for each record) containing:
* the values of fields grouped by the fields in ``groupby`` argument
* __domain: list of tuples specifying the search criteria
* __context: dictionary with argument like ``groupby``
:rtype: [{'field_name_1': value, ...]
:raise AccessError: * if user has no read rights on the requested object
* if user tries to bypass access rules for read on the requested object
"""
if context is None:
context = {}
self.check_access_rights(cr, uid, 'read')
query = self._where_calc(cr, uid, domain, context=context)
fields = fields or self._columns.keys()
groupby = [groupby] if isinstance(groupby, basestring) else groupby
groupby_list = groupby[:1] if lazy else groupby
annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
for gb in groupby_list]
groupby_fields = [g['field'] for g in annotated_groupbys]
order = orderby or ','.join([g for g in groupby_list])
groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
self._apply_ir_rules(cr, uid, query, 'read', context=context)
for gb in groupby_fields:
assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
if not (gb in self._all_columns):
# Don't allow arbitrary values, as this would be a SQL injection vector!
raise except_orm(_('Invalid group_by'),
_('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
aggregated_fields = [
f for f in fields
if f not in ('id', 'sequence')
if f not in groupby_fields
if self._all_columns[f].column._type in ('integer', 'float')
if getattr(self._all_columns[f].column, '_classic_write')]
field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
for gb in annotated_groupbys:
select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
from_clause, where_clause, where_clause_params = query.get_sql()
if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
else:
count_field = '_'
prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
query = """
SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s_count %(extra_fields)s
FROM %(from)s
%(where)s
%(groupby)s
%(orderby)s
%(limit)s
%(offset)s
""" % {
'table': self._table,
'count_field': count_field,
'extra_fields': prefix_terms(',', select_terms),
'from': from_clause,
'where': prefix_term('WHERE', where_clause),
'groupby': prefix_terms('GROUP BY', groupby_terms),
'orderby': prefix_terms('ORDER BY', orderby_terms),
'limit': prefix_term('LIMIT', int(limit) if limit else None),
'offset': prefix_term('OFFSET', int(offset) if limit else None),
}
cr.execute(query, where_clause_params)
fetched_data = cr.dictfetchall()
if not groupby_fields:
return {r.pop('id'): r for r in fetched_data}
many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
if many2onefields:
data_ids = [r['id'] for r in fetched_data]
many2onefields = list(set(many2onefields))
data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
for d in fetched_data:
d.update(data_dict[d['id']])
data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
if lazy and groupby_fields[0] in self._group_by_full:
# Right now, read_group only fill results in lazy mode (by default).
# If you need to have the empty groups in 'eager' mode, then the
# method _read_group_fill_results need to be completely reimplemented
# in a sane way
result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
aggregated_fields, result, read_group_order=order,
context=context)
return result
def _inherits_join_add(self, current_model, parent_model_name, query):
"""
Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
:param current_model: current model object
:param parent_model_name: name of the parent model for which the clauses should be added
:param query: query object on which the JOIN should be added
"""
inherits_field = current_model._inherits[parent_model_name]
parent_model = self.pool[parent_model_name]
parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
return parent_alias
def _inherits_join_calc(self, field, query):
"""
Adds missing table select and join clause(s) to ``query`` for reaching
the field coming from an '_inherits' parent table (no duplicates).
:param field: name of inherited field to reach
:param query: query object on which the JOIN should be added
:return: qualified name of field, to be used in SELECT clause
"""
current_table = self
parent_alias = '"%s"' % current_table._table
while field in current_table._inherit_fields and not field in current_table._columns:
parent_model_name = current_table._inherit_fields[field][0]
parent_table = self.pool[parent_model_name]
parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
current_table = parent_table
return '%s."%s"' % (parent_alias, field)
def _parent_store_compute(self, cr):
if not self._parent_store:
return
_logger.info('Computing parent left and right for table %s...', self._table)
def browse_rec(root, pos=0):
# TODO: set order
where = self._parent_name+'='+str(root)
if not root:
where = self._parent_name+' IS NULL'
if self._parent_order:
where += ' order by '+self._parent_order
cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
pos2 = pos + 1
for id in cr.fetchall():
pos2 = browse_rec(id[0], pos2)
cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
return pos2 + 1
query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
if self._parent_order:
query += ' order by ' + self._parent_order
pos = 0
cr.execute(query)
for (root,) in cr.fetchall():
pos = browse_rec(root, pos)
return True
def _update_store(self, cr, f, k):
_logger.info("storing computed values of fields.function '%s'", k)
ss = self._columns[k]._symbol_set
update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
cr.execute('select id from '+self._table)
ids_lst = map(lambda x: x[0], cr.fetchall())
while ids_lst:
iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
for key, val in res.items():
if f._multi:
val = val[k]
# if val is a many2one, just write the ID
if type(val) == tuple:
val = val[0]
if val is not False:
cr.execute(update_query, (ss[1](val), key))
def _check_selection_field_value(self, cr, uid, field, value, context=None):
"""Raise except_orm if value is not among the valid values for the selection field"""
if self._columns[field]._type == 'reference':
val_model, val_id_str = value.split(',', 1)
val_id = False
try:
val_id = long(val_id_str)
except ValueError:
pass
if not val_id:
raise except_orm(_('ValidateError'),
_('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
val = val_model
else:
val = value
if isinstance(self._columns[field].selection, (tuple, list)):
if val in dict(self._columns[field].selection):
return
elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
return
raise except_orm(_('ValidateError'),
_('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._table, field))
def _check_removed_columns(self, cr, log=False):
# iterate on the database columns to drop the NOT NULL constraints
# of fields which were required but have been removed (or will be added by another module)
columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
columns += MAGIC_COLUMNS
cr.execute("SELECT a.attname, a.attnotnull"
" FROM pg_class c, pg_attribute a"
" WHERE c.relname=%s"
" AND c.oid=a.attrelid"
" AND a.attisdropped=%s"
" AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
" AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
for column in cr.dictfetchall():
if log:
_logger.debug("column %s is in the table %s but not in the corresponding object %s",
column['attname'], self._table, self._name)
if column['attnotnull']:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, column['attname'])
def _save_constraint(self, cr, constraint_name, type):
"""
Record the creation of a constraint for this model, to make it possible
to delete it later when the module is uninstalled. Type can be either
'f' or 'u' depending on the constraint being a foreign key or not.
"""
if not self._module:
# no need to save constraints for custom models as they're not part
# of any module
return
assert type in ('f', 'u')
cr.execute("""
SELECT 1 FROM ir_model_constraint, ir_module_module
WHERE ir_model_constraint.module=ir_module_module.id
AND ir_model_constraint.name=%s
AND ir_module_module.name=%s
""", (constraint_name, self._module))
if not cr.rowcount:
cr.execute("""
INSERT INTO ir_model_constraint
(name, date_init, date_update, module, model, type)
VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s), %s)""",
(constraint_name, self._module, self._name, type))
def _save_relation_table(self, cr, relation_table):
"""
Record the creation of a many2many for this model, to make it possible
to delete it later when the module is uninstalled.
"""
cr.execute("""
SELECT 1 FROM ir_model_relation, ir_module_module
WHERE ir_model_relation.module=ir_module_module.id
AND ir_model_relation.name=%s
AND ir_module_module.name=%s
""", (relation_table, self._module))
if not cr.rowcount:
cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s))""",
(relation_table, self._module, self._name))
# checked version: for direct m2o starting from `self`
def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
assert self.is_transient() or not dest_model.is_transient(), \
'Many2One relationships from non-transient Model to TransientModel are forbidden'
if self.is_transient() and not dest_model.is_transient():
# TransientModel relationships to regular Models are annoying
# usually because they could block deletion due to the FKs.
# So unless stated otherwise we default them to ondelete=cascade.
ondelete = ondelete or 'cascade'
fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
self._foreign_keys.add(fk_def)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
# unchecked version: for custom cases, such as m2m relationships
def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
self._foreign_keys.add(fk_def)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
def _drop_constraint(self, cr, source_table, constraint_name):
cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
# Find FK constraint(s) currently established for the m2o field,
# and see whether they are stale or not
cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
cl2.relname as foreign_table
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND cl1.relname = %s
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
AND att1.attname = %s
AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND att2.attname = %s
AND con.contype = 'f'""", (source_table, source_field, 'id'))
constraints = cr.dictfetchall()
if constraints:
if len(constraints) == 1:
# Is it the right constraint?
cons, = constraints
if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
or cons['foreign_table'] != dest_model._table:
# Wrong FK: drop it and recreate
_schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
else:
# it's all good, nothing to do!
return
else:
# Multiple FKs found for the same field, drop them all, and re-create
for cons in constraints:
_schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
# (re-)create the FK
self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
def _auto_init(self, cr, context=None):
"""
Call _field_create and, unless _auto is False:
- create the corresponding table in database for the model,
- possibly add the parent columns in database,
- possibly add the columns 'create_uid', 'create_date', 'write_uid',
'write_date' in database if _log_access is True (the default),
- report on database columns no more existing in _columns,
- remove no more existing not null constraints,
- alter existing database columns to match _columns,
- create database tables to match _columns,
- add database indices to match _columns,
- save in self._foreign_keys a list a foreign keys to create (see
_auto_end).
"""
self._foreign_keys = set()
raise_on_invalid_object_name(self._name)
if context is None:
context = {}
store_compute = False
todo_end = []
update_custom_fields = context.get('update_custom_fields', False)
self._field_create(cr, context=context)
create = not self._table_exist(cr)
if self._auto:
if create:
self._create_table(cr)
cr.commit()
if self._parent_store:
if not self._parent_columns_exist(cr):
self._create_parent_columns(cr)
store_compute = True
# Create the create_uid, create_date, write_uid, write_date, columns if desired.
if self._log_access:
self._add_log_columns(cr)
self._check_removed_columns(cr, log=False)
# iterate on the "object columns"
column_data = self._select_column_data(cr)
for k, f in self._columns.iteritems():
if k in MAGIC_COLUMNS:
continue
# Don't update custom (also called manual) fields
if f.manual and not update_custom_fields:
continue
if isinstance(f, fields.one2many):
self._o2m_raise_on_missing_reference(cr, f)
elif isinstance(f, fields.many2many):
self._m2m_raise_or_create_relation(cr, f)
else:
res = column_data.get(k)
# The field is not found as-is in database, try if it
# exists with an old name.
if not res and hasattr(f, 'oldname'):
res = column_data.get(f.oldname)
if res:
cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
res['attname'] = k
column_data[k] = res
_schema.debug("Table '%s': renamed column '%s' to '%s'",
self._table, f.oldname, k)
# The field already exists in database. Possibly
# change its type, rename it, drop it or change its
# constraints.
if res:
f_pg_type = res['typname']
f_pg_size = res['size']
f_pg_notnull = res['attnotnull']
if isinstance(f, fields.function) and not f.store and\
not getattr(f, 'nodrop', False):
_logger.info('column %s (%s) converted to a function, removed from table %s',
k, f.string, self._table)
cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
cr.commit()
_schema.debug("Table '%s': dropped column '%s' with cascade",
self._table, k)
f_obj_type = None
else:
f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
if f_obj_type:
ok = False
casts = [
('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
('varchar', 'text', 'TEXT', ''),
('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
('timestamp', 'date', 'date', '::date'),
('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
]
if f_pg_type == 'varchar' and f._type == 'char' and ((f.size is None and f_pg_size) or f_pg_size < f.size):
try:
with cr.savepoint():
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
except psycopg2.NotSupportedError:
# In place alter table cannot be done because a view is depending of this field.
# Do a manual copy. This will drop the view (that will be recreated later)
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
cr.commit()
_schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
for c in casts:
if (f_pg_type==c[0]) and (f._type==c[1]):
if f_pg_type != f_obj_type:
ok = True
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
cr.commit()
_schema.debug("Table '%s': column '%s' changed type from %s to %s",
self._table, k, c[0], c[1])
break
if f_pg_type != f_obj_type:
if not ok:
i = 0
while True:
newname = k + '_moved' + str(i)
cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
"WHERE c.relname=%s " \
"AND a.attname=%s " \
"AND c.oid=a.attrelid ", (self._table, newname))
if not cr.fetchone()[0]:
break
i += 1
if f_pg_notnull:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
_schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
self._table, k, f_pg_type, f._type, newname)
# if the field is required and hasn't got a NOT NULL constraint
if f.required and f_pg_notnull == 0:
# set the field to the default value if any
if k in self._defaults:
if callable(self._defaults[k]):
default = self._defaults[k](self, cr, SUPERUSER_ID, context)
else:
default = self._defaults[k]
if default is not None:
ss = self._columns[k]._symbol_set
query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (self._table, k, ss[0], k)
cr.execute(query, (ss[1](default),))
# add the NOT NULL constraint
cr.commit()
try:
#use savepoints for openupgrade instead of transactions
cr.execute('SAVEPOINT add_constraint');
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
cr.execute('RELEASE SAVEPOINT add_constraint');
cr.commit()
_schema.debug("Table '%s': column '%s': added NOT NULL constraint",
self._table, k)
except Exception:
cr.execute('ROLLBACK TO SAVEPOINT add_constraint');
msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
"If you want to have it, you should update the records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
_schema.warning(msg, self._table, k, self._table, k)
cr.commit()
elif not f.required and f_pg_notnull == 1:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.commit()
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, k)
# Verify index
indexname = '%s_%s_index' % (self._table, k)
cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
res2 = cr.dictfetchall()
if not res2 and f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
cr.commit()
if f._type == 'text':
# FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
"This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
" because there is a length limit for indexable btree values!\n"\
"Use a search view instead if you simply want to make the field searchable."
_schema.warning(msg, self._table, f._type, k)
if res2 and not f.select:
cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
cr.commit()
msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
_schema.debug(msg, self._table, k, f._type)
if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
dest_model = self.pool[f._obj]
if dest_model._table != 'ir_actions':
self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
# The field doesn't exist in database. Create it if necessary.
else:
if not isinstance(f, fields.function) or f.store:
# add the missing field
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, k, get_pg_type(f)[1])
# initialize it
if not create and k in self._defaults:
if callable(self._defaults[k]):
default = self._defaults[k](self, cr, SUPERUSER_ID, context)
else:
default = self._defaults[k]
ss = self._columns[k]._symbol_set
query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0])
cr.execute(query, (ss[1](default),))
cr.commit()
_logger.debug("Table '%s': setting default value of new column %s", self._table, k)
# remember the functions to call for the stored fields
if isinstance(f, fields.function):
order = 10
if f.store is not True: # i.e. if f.store is a dict
order = f.store[f.store.keys()[0]][2]
todo_end.append((order, self._update_store, (f, k)))
# and add constraints if needed
if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
if f._obj not in self.pool:
raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
dest_model = self.pool[f._obj]
ref = dest_model._table
# ir_actions is inherited so foreign key doesn't work on it
if ref != 'ir_actions':
self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
if f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
if f.required:
try:
#use savepoints for openupgrade instead of transactions
cr.execute('SAVEPOINT add_constraint');
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
_schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
self._table, k)
cr.execute('RELEASE SAVEPOINT add_constraint');
except Exception:
cr.execute('ROLLBACK TO SAVEPOINT add_constraint');
msg = "WARNING: unable to set column %s of table %s not null !\n"\
"Try to re-run: openerp-server --update=module\n"\
"If it doesn't work, update records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
_logger.warning(msg, k, self._table, self._table, k)
cr.commit()
else:
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
create = not bool(cr.fetchone())
cr.commit() # start a new transaction
if self._auto:
self._add_sql_constraints(cr)
if create:
self._execute_sql(cr)
if store_compute:
self._parent_store_compute(cr)
cr.commit()
return todo_end
def _auto_end(self, cr, context=None):
""" Create the foreign keys recorded by _auto_init. """
for t, k, r, d in self._foreign_keys:
cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
cr.commit()
del self._foreign_keys
def _table_exist(self, cr):
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
return cr.rowcount
def _create_table(self, cr):
cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
_schema.debug("Table '%s': created", self._table)
def _parent_columns_exist(self, cr):
cr.execute("""SELECT c.relname
FROM pg_class c, pg_attribute a
WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
""", (self._table, 'parent_left'))
return cr.rowcount
def _create_parent_columns(self, cr):
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
if 'parent_left' not in self._columns:
_logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_left', 'INTEGER')
elif not self._columns['parent_left'].select:
_logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if 'parent_right' not in self._columns:
_logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_right', 'INTEGER')
elif not self._columns['parent_right'].select:
_logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
_logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
self._parent_name, self._name)
cr.commit()
def _add_log_columns(self, cr):
for field, field_def in LOG_ACCESS_COLUMNS.iteritems():
cr.execute("""
SELECT c.relname
FROM pg_class c, pg_attribute a
WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
""", (self._table, field))
if not cr.rowcount:
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, field, field_def))
cr.commit()
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, field, field_def)
def _select_column_data(self, cr):
# attlen is the number of bytes necessary to represent the type when
# the type has a fixed size. If the type has a varying size attlen is
# -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
"FROM pg_class c,pg_attribute a,pg_type t " \
"WHERE c.relname=%s " \
"AND c.oid=a.attrelid " \
"AND a.atttypid=t.oid", (self._table,))
return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
def _o2m_raise_on_missing_reference(self, cr, f):
# TODO this check should be a method on fields.one2many.
if f._obj in self.pool:
other = self.pool[f._obj]
# TODO the condition could use fields_get_keys().
if f._fields_id not in other._columns.keys():
if f._fields_id not in other._inherit_fields.keys():
raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
def _m2m_raise_or_create_relation(self, cr, f):
m2m_tbl, col1, col2 = f._sql_names(self)
self._save_relation_table(cr, m2m_tbl)
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
if not cr.dictfetchall():
if f._obj not in self.pool:
raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
dest_model = self.pool[f._obj]
ref = dest_model._table
cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
# create foreign key references with ondelete=cascade, unless the targets are SQL views
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
cr.commit()
_schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
def _add_sql_constraints(self, cr):
"""
Modify this model's database table constraints so they match the one in
_sql_constraints.
"""
def unify_cons_text(txt):
return txt.lower().replace(', ',',').replace(' (','(')
for (key, con, _) in self._sql_constraints:
conname = '%s_%s' % (self._table, key)
self._save_constraint(cr, conname, 'u')
cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
existing_constraints = cr.dictfetchall()
sql_actions = {
'drop': {
'execute': False,
'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
self._table, conname, con),
'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
'order': 1,
},
'add': {
'execute': False,
'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
self._table, con),
'order': 2,
},
}
if not existing_constraints:
# constraint does not exists:
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
# constraint exists but its definition has changed:
sql_actions['drop']['execute'] = True
sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
# we need to add the constraint:
sql_actions = [item for item in sql_actions.values()]
sql_actions.sort(key=lambda x: x['order'])
for sql_action in [action for action in sql_actions if action['execute']]:
try:
#use savepoints for openupgrade instead of transactions
cr.execute('SAVEPOINT add_constraint2');
cr.execute(sql_action['query'])
cr.execute('RELEASE SAVEPOINT add_constraint2');
_schema.debug(sql_action['msg_ok'])
except:
_schema.warning(sql_action['msg_err'])
cr.execute('ROLLBACK TO SAVEPOINT add_constraint2');
def _execute_sql(self, cr):
""" Execute the SQL code from the _sql attribute (if any)."""
if hasattr(self, "_sql"):
for line in self._sql.split(';'):
line2 = line.replace('\n', '').strip()
if line2:
cr.execute(line2)
cr.commit()
#
# Update objects that uses this one to update their _inherits fields
#
def _inherits_reload_src(self):
""" Recompute the _inherit_fields mapping on each _inherits'd child model."""
for obj in self.pool.models.values():
if self._name in obj._inherits:
obj._inherits_reload()
def _inherits_reload(self):
""" Recompute the _inherit_fields mapping.
This will also call itself on each inherits'd child model.
"""
res = {}
for table in self._inherits:
other = self.pool[table]
for col in other._columns.keys():
res[col] = (table, self._inherits[table], other._columns[col], table)
for col in other._inherit_fields.keys():
res[col] = (table, self._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
self._inherit_fields = res
self._all_columns = self._get_column_infos()
self._inherits_reload_src()
def _get_column_infos(self):
"""Returns a dict mapping all fields names (direct fields and
inherited field via _inherits) to a ``column_info`` struct
giving detailed columns """
result = {}
for k, (parent, m2o, col, original_parent) in self._inherit_fields.iteritems():
result[k] = fields.column_info(k, col, parent, m2o, original_parent)
for k, col in self._columns.iteritems():
result[k] = fields.column_info(k, col)
return result
def _inherits_check(self):
for table, field_name in self._inherits.items():
if field_name not in self._columns:
_logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name)
self._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
required=True, ondelete="cascade")
elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
_logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, self._name)
self._columns[field_name].required = True
self._columns[field_name].ondelete = "cascade"
def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
""" Return the definition of each field.
The returned value is a dictionary (indiced by field name) of
dictionaries. The _inherits'd fields are included. The string, help,
and selection (if present) attributes are translated.
:param cr: database cursor
:param user: current user id
:param allfields: list of fields
:param context: context arguments, like lang, time zone
:return: dictionary of field dictionaries, each one describing a field of the business object
:raise AccessError: * if user has no create/write rights on the requested object
"""
if context is None:
context = {}
write_access = self.check_access_rights(cr, user, 'write', raise_exception=False) \
or self.check_access_rights(cr, user, 'create', raise_exception=False)
res = {}
translation_obj = self.pool.get('ir.translation')
for parent in self._inherits:
res.update(self.pool[parent].fields_get(cr, user, allfields, context))
for f, field in self._columns.iteritems():
if (allfields and f not in allfields) or \
(field.groups and not self.user_has_groups(cr, user, groups=field.groups, context=context)):
continue
res[f] = fields.field_to_dict(self, cr, user, field, context=context)
if not write_access:
res[f]['readonly'] = True
res[f]['states'] = {}
if 'lang' in context:
if 'string' in res[f]:
res_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'field', context['lang'])
if res_trans:
res[f]['string'] = res_trans
if 'help' in res[f]:
help_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'help', context['lang'])
if help_trans:
res[f]['help'] = help_trans
return res
def get_empty_list_help(self, cr, user, help, context=None):
""" Generic method giving the help message displayed when having
no result to display in a list or kanban view. By default it returns
the help given in parameter that is generally the help message
defined in the action.
"""
return help
def check_field_access_rights(self, cr, user, operation, fields, context=None):
"""
Check the user access rights on the given fields. This raises Access
Denied if the user does not have the rights. Otherwise it returns the
fields (as is if the fields is not falsy, or the readable/writable
fields if fields is falsy).
"""
def p(field_name):
"""Predicate to test if the user has access to the given field name."""
# Ignore requested field if it doesn't exist. This is ugly but
# it seems to happen at least with 'name_alias' on res.partner.
if field_name not in self._all_columns:
return True
field = self._all_columns[field_name].column
if user != SUPERUSER_ID and field.groups:
return self.user_has_groups(cr, user, groups=field.groups, context=context)
else:
return True
if not fields:
fields = filter(p, self._all_columns.keys())
else:
filtered_fields = filter(lambda a: not p(a), fields)
if filtered_fields:
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s', operation, user, self._name, ', '.join(filtered_fields))
raise except_orm(
_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. '
'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
return fields
def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
""" Read records with given ids with the given fields
:param cr: database cursor
:param user: current user id
:param ids: id or list of the ids of the records to read
:param fields: optional list of field names to return (default: all fields would be returned)
:type fields: list (example ['field_name_1', ...])
:param context: optional context dictionary - it may contains keys for specifying certain options
like ``context_lang``, ``context_tz`` to alter the results of the call.
A special ``bin_size`` boolean flag may also be passed in the context to request the
value of all fields.binary columns to be returned as the size of the binary instead of its
contents. This can also be selectively overriden by passing a field-specific flag
in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
:return: list of dictionaries((dictionary per record asked)) with requested field values
:rtype: [{‘name_of_the_field’: value, ...}, ...]
:raise AccessError: * if user has no read rights on the requested object
* if user tries to bypass access rules for read on the requested object
"""
self.check_access_rights(cr, user, 'read')
fields = self.check_field_access_rights(cr, user, 'read', fields)
if isinstance(ids, (int, long)):
select = [ids]
else:
select = ids
select = map(lambda x: isinstance(x, dict) and x['id'] or x, select)
result = self._read_flat(cr, user, select, fields, context, load)
if isinstance(ids, (int, long)):
return result and result[0] or False
return result
def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'):
if not context:
context = {}
if not ids:
return []
if fields_to_read is None:
fields_to_read = self._columns.keys()
# all inherited fields + all non inherited fields for which the attribute whose name is in load is True
fields_pre = [f for f in fields_to_read if
f == self.CONCURRENCY_CHECK_FIELD
or (f in self._columns and getattr(self._columns[f], '_classic_write'))
] + self._inherits.values()
res = []
if len(fields_pre):
def convert_field(f):
f_qual = '%s."%s"' % (self._table, f) # need fully-qualified references in case len(tables) > 1
if f in ('create_date', 'write_date'):
return "date_trunc('second', %s) as %s" % (f_qual, f)
if f == self.CONCURRENCY_CHECK_FIELD:
if self._log_access:
return "COALESCE(%s.write_date, %s.create_date, (now() at time zone 'UTC'))::timestamp AS %s" % (self._table, self._table, f,)
return "(now() at time zone 'UTC')::timestamp AS %s" % (f,)
if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
return 'length(%s) as "%s"' % (f_qual, f)
return f_qual
# Construct a clause for the security rules.
# 'tables' hold the list of tables necessary for the SELECT including the ir.rule clauses,
# or will at least contain self._table.
rule_clause, rule_params, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
fields_pre2 = map(convert_field, fields_pre)
order_by = self._parent_order or self._order
select_fields = ','.join(fields_pre2 + ['%s.id' % self._table])
query = 'SELECT %s FROM %s WHERE %s.id IN %%s' % (select_fields, ','.join(tables), self._table)
if rule_clause:
query += " AND " + (' OR '.join(rule_clause))
query += " ORDER BY " + order_by
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute(query, [tuple(sub_ids)] + rule_params)
results = cr.dictfetchall()
result_ids = [x['id'] for x in results]
self._check_record_rules_result_count(cr, user, sub_ids, result_ids, 'read', context=context)
res.extend(results)
else:
self.check_access_rule(cr, user, ids, 'read', context=context)
res = map(lambda x: {'id': x}, ids)
if context.get('lang'):
for f in fields_pre:
if f == self.CONCURRENCY_CHECK_FIELD:
continue
if self._columns[f].translate:
ids = [x['id'] for x in res]
#TODO: optimize out of this loop
res_trans = self.pool.get('ir.translation')._get_ids(cr, user, self._name+','+f, 'model', context['lang'], ids)
for r in res:
r[f] = res_trans.get(r['id'], False) or r[f]
for table in self._inherits:
col = self._inherits[table]
cols = [x for x in intersect(self._inherit_fields.keys(), fields_to_read) if x not in self._columns.keys()]
if not cols:
continue
res2 = self.pool[table].read(cr, user, [x[col] for x in res], cols, context, load)
res3 = {}
for r in res2:
res3[r['id']] = r
del r['id']
for record in res:
if not record[col]: # if the record is deleted from _inherits table?
continue
record.update(res3[record[col]])
if col not in fields_to_read:
del record[col]
# all fields which need to be post-processed by a simple function (symbol_get)
fields_post = filter(lambda x: x in self._columns and self._columns[x]._symbol_get, fields_to_read)
if fields_post:
for r in res:
for f in fields_post:
r[f] = self._columns[f]._symbol_get(r[f])
ids = [x['id'] for x in res]
# all non inherited fields for which the attribute whose name is in load is False
fields_post = filter(lambda x: x in self._columns and not getattr(self._columns[x], load), fields_to_read)
# Compute POST fields
todo = {}
for f in fields_post:
todo.setdefault(self._columns[f]._multi, [])
todo[self._columns[f]._multi].append(f)
for key, val in todo.items():
if key:
res2 = self._columns[val[0]].get(cr, self, ids, val, user, context=context, values=res)
assert res2 is not None, \
'The function field "%s" on the "%s" model returned None\n' \
'(a dictionary was expected).' % (val[0], self._name)
for pos in val:
for record in res:
if isinstance(res2[record['id']], str): res2[record['id']] = eval(res2[record['id']]) #TOCHECK : why got string instend of dict in python2.6
multi_fields = res2.get(record['id'],{})
if multi_fields:
record[pos] = multi_fields.get(pos,[])
else:
for f in val:
res2 = self._columns[f].get(cr, self, ids, f, user, context=context, values=res)
for record in res:
if res2:
record[f] = res2[record['id']]
else:
record[f] = []
# Warn about deprecated fields now that fields_pre and fields_post are computed
# Explicitly use list() because we may receive tuples
for f in list(fields_pre) + list(fields_post):
field_column = self._all_columns.get(f) and self._all_columns.get(f).column
if field_column and field_column.deprecated:
_logger.warning('Field %s.%s is deprecated: %s', self._name, f, field_column.deprecated)
readonly = None
for vals in res:
for field in vals.copy():
fobj = None
if field in self._columns:
fobj = self._columns[field]
if fobj:
groups = fobj.read
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
elif readonly[0][0] == 0:
edit = False
else:
edit = False
if not edit:
if type(vals[field]) == type([]):
vals[field] = []
elif type(vals[field]) == type(0.0):
vals[field] = 0
elif type(vals[field]) == type(''):
vals[field] = '=No Permission='
else:
vals[field] = False
if vals[field] is None:
vals[field] = False
return res
# TODO check READ access
def perm_read(self, cr, user, ids, context=None, details=True):
"""
Returns some metadata about the given records.
:param details: if True, \*_uid fields are replaced with the name of the user
:return: list of ownership dictionaries for each requested record
:rtype: list of dictionaries with the following keys:
* id: object id
* create_uid: user who created the record
* create_date: date when the record was created
* write_uid: last user who changed the record
* write_date: date of the last change to the record
* xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
"""
if not context:
context = {}
if not ids:
return []
fields = ''
uniq = isinstance(ids, (int, long))
if uniq:
ids = [ids]
fields = ['id']
if self._log_access:
fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
quoted_table = '"%s"' % self._table
fields_str = ",".join('%s.%s'%(quoted_table, field) for field in fields)
query = '''SELECT %s, __imd.module, __imd.name
FROM %s LEFT JOIN ir_model_data __imd
ON (__imd.model = %%s and __imd.res_id = %s.id)
WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
cr.execute(query, (self._name, tuple(ids)))
res = cr.dictfetchall()
for r in res:
for key in r:
r[key] = r[key] or False
if details and key in ('write_uid', 'create_uid') and r[key]:
try:
r[key] = self.pool.get('res.users').name_get(cr, user, [r[key]])[0]
except Exception:
pass # Leave the numeric uid there
r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
del r['name'], r['module']
if uniq:
return res[ids[0]]
return res
def _check_concurrency(self, cr, ids, context):
if not context:
return
if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
return
check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
for sub_ids in cr.split_for_in_conditions(ids):
ids_to_check = []
for id in sub_ids:
id_ref = "%s,%s" % (self._name, id)
update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
if update_date:
ids_to_check.extend([id, update_date])
if not ids_to_check:
continue
cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
res = cr.fetchone()
if res:
# mention the first one only to keep the error message readable
raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
"""Verify the returned rows after applying record rules matches
the length of `ids`, and raise an appropriate exception if it does not.
"""
ids, result_ids = set(ids), set(result_ids)
missing_ids = ids - result_ids
if missing_ids:
# Attempt to distinguish record rule restriction vs deleted records,
# to provide a more specific error message - check if the missinf
cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
forbidden_ids = [x[0] for x in cr.fetchall()]
if forbidden_ids:
# the missing ids are (at least partially) hidden by access rules
if uid == SUPERUSER_ID:
return
_logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
raise except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
else:
# If we get here, the missing_ids are not in the database
if operation in ('read','unlink'):
# No need to warn about deleting an already deleted record.
# And no error when reading a record that was deleted, to prevent spurious
# errors for non-transactional search/read sequences coming from clients
return
_logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
raise except_orm(_('Missing document(s)'),
_('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
"""Verifies that the operation given by ``operation`` is allowed for the user
according to the access rights."""
return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
"""Verifies that the operation given by ``operation`` is allowed for the user
according to ir.rules.
:param operation: one of ``write``, ``unlink``
:raise except_orm: * if current ir.rules do not permit this operation.
:return: None if the operation is allowed
"""
if uid == SUPERUSER_ID:
return
if self.is_transient():
# Only one single implicit access rule for transient models: owner only!
# This is ok to hardcode because we assert that TransientModels always
# have log_access enabled so that the create_uid column is always there.
# And even with _inherits, these fields are always present in the local
# table too, so no need for JOINs.
cr.execute("""SELECT distinct create_uid
FROM %s
WHERE id IN %%s""" % self._table, (tuple(ids),))
uids = [x[0] for x in cr.fetchall()]
if len(uids) != 1 or uids[0] != uid:
raise except_orm(_('Access Denied'),
_('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
else:
where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
if where_clause:
where_clause = ' and ' + ' and '.join(where_clause)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
' WHERE ' + self._table + '.id IN %s' + where_clause,
[sub_ids] + where_params)
returned_ids = [x['id'] for x in cr.dictfetchall()]
self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
def create_workflow(self, cr, uid, ids, context=None):
"""Create a workflow instance for each given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_create(uid, self._name, res_id, cr)
return True
def delete_workflow(self, cr, uid, ids, context=None):
"""Delete the workflow instances bound to the given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_delete(uid, self._name, res_id, cr)
return True
def step_workflow(self, cr, uid, ids, context=None):
"""Reevaluate the workflow instances of the given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_write(uid, self._name, res_id, cr)
return True
def signal_workflow(self, cr, uid, ids, signal, context=None):
"""Send given workflow signal and return a dict mapping ids to workflow results"""
from openerp import workflow
result = {}
for res_id in ids:
result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
return result
def redirect_workflow(self, cr, uid, old_new_ids, context=None):
""" Rebind the workflow instance bound to the given 'old' record IDs to
the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
"""
from openerp import workflow
for old_id, new_id in old_new_ids:
workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
return True
def unlink(self, cr, uid, ids, context=None):
"""
Delete records with given ids
:param cr: database cursor
:param uid: current user id
:param ids: id or list of ids
:param context: (optional) context arguments, like lang, time zone
:return: True
:raise AccessError: * if user has no unlink rights on the requested object
* if user tries to bypass access rules for unlink on the requested object
:raise UserError: if the record is default property for other records
"""
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
self._check_concurrency(cr, ids, context)
self.check_access_rights(cr, uid, 'unlink')
ir_property = self.pool.get('ir.property')
# Check if the records are used as default properties.
domain = [('res_id', '=', False),
('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
]
if ir_property.search(cr, uid, domain, context=context):
raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
# Delete the records' properties.
property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
ir_property.unlink(cr, uid, property_ids, context=context)
self.delete_workflow(cr, uid, ids, context=context)
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
pool_model_data = self.pool.get('ir.model.data')
ir_values_obj = self.pool.get('ir.values')
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('delete from ' + self._table + ' ' \
'where id IN %s', (sub_ids,))
# Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
# as these are not connected with real database foreign keys, and would be dangling references.
# Note: following steps performed as admin to avoid access rights restrictions, and with no context
# to avoid possible side-effects during admin calls.
# Step 1. Calling unlink of ir_model_data only for the affected IDS
reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
# Step 2. Marching towards the real deletion of referenced records
if reference_ids:
pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
# For the same reason, removing the record relevant to ir_values
ir_value_ids = ir_values_obj.search(cr, uid,
['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
context=context)
if ir_value_ids:
ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
for order, obj_name, store_ids, fields in result_store:
if obj_name == self._name:
effective_store_ids = list(set(store_ids) - set(ids))
else:
effective_store_ids = store_ids
if effective_store_ids:
obj = self.pool[obj_name]
cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
rids = map(lambda x: x[0], cr.fetchall())
if rids:
obj._store_set_values(cr, uid, rids, fields, context)
return True
#
# TODO: Validate
#
def write(self, cr, user, ids, vals, context=None):
"""
Update records with given ids with the given field values
:param cr: database cursor
:param user: current user id
:type user: integer
:param ids: object id or list of object ids to update according to **vals**
:param vals: field values to update, e.g {'field_name': new_field_value, ...}
:type vals: dictionary
:param context: (optional) context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
:type context: dictionary
:return: True
:raise AccessError: * if user has no write rights on the requested object
* if user tries to bypass access rules for write on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
**Note**: The type of field values to pass in ``vals`` for relationship fields is specific:
+ For a many2many field, a list of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
(3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
(4, ID) link to existing record with id = ID (adds a relationship)
(5) unlink all (like using (3,ID) for all linked records)
(6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
Example:
[(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
+ For a one2many field, a lits of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
Example:
[(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
+ For a many2one field, simply use the ID of target record, which must already exist, or ``False`` to remove the link.
+ For a reference field, use a string with the model name, a comma, and the target object id (example: ``'product.product, 5'``)
"""
readonly = None
self.check_field_access_rights(cr, user, 'write', vals.keys())
for field in vals.copy():
fobj = None
if field in self._columns:
fobj = self._columns[field]
elif field in self._inherit_fields:
fobj = self._inherit_fields[field][2]
if not fobj:
continue
groups = fobj.write
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
if not edit:
vals.pop(field)
if not context:
context = {}
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
self._check_concurrency(cr, ids, context)
self.check_access_rights(cr, user, 'write')
result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
# No direct update of parent_left/right
vals.pop('parent_left', None)
vals.pop('parent_right', None)
parents_changed = []
parent_order = self._parent_order or self._order
if self._parent_store and (self._parent_name in vals):
# The parent_left/right computation may take up to
# 5 seconds. No need to recompute the values if the
# parent is the same.
# Note: to respect parent_order, nodes must be processed in
# order, so ``parents_changed`` must be ordered properly.
parent_val = vals[self._parent_name]
if parent_val:
query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
(self._table, self._parent_name, self._parent_name, parent_order)
cr.execute(query, (tuple(ids), parent_val))
else:
query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
(self._table, self._parent_name, parent_order)
cr.execute(query, (tuple(ids),))
parents_changed = map(operator.itemgetter(0), cr.fetchall())
upd0 = []
upd1 = []
upd_todo = []
updend = []
direct = []
totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
for field in vals:
field_column = self._all_columns.get(field) and self._all_columns.get(field).column
if field_column and field_column.deprecated:
_logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
if field in self._columns:
if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
if (not totranslate) or not self._columns[field].translate:
upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
upd1.append(self._columns[field]._symbol_set[1](vals[field]))
direct.append(field)
else:
upd_todo.append(field)
else:
updend.append(field)
if field in self._columns \
and hasattr(self._columns[field], 'selection') \
and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if self._log_access:
upd0.append('write_uid=%s')
upd0.append("write_date=(now() at time zone 'UTC')")
upd1.append(user)
if len(upd0):
self.check_access_rule(cr, user, ids, 'write', context=context)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
'where id IN %s', upd1 + [sub_ids])
if cr.rowcount != len(sub_ids):
raise except_orm(_('AccessError'),
_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
if totranslate:
# TODO: optimize
for f in direct:
if self._columns[f].translate:
src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
if not src_trans:
src_trans = vals[f]
# Inserting value to DB
context_wo_lang = dict(context, lang=None)
self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
# call the 'set' method of fields which are not classic_write
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
# default element in context must be removed when call a one2many or many2many
rel_context = context.copy()
for c in context.items():
if c[0].startswith('default_'):
del rel_context[c[0]]
for field in upd_todo:
for id in ids:
result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
unknown_fields = updend[:]
for table in self._inherits:
col = self._inherits[table]
nids = []
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
'where id IN %s', (sub_ids,))
nids.extend([x[0] for x in cr.fetchall()])
v = {}
for val in updend:
if self._inherit_fields[val][0] == table:
v[val] = vals[val]
unknown_fields.remove(val)
if v:
self.pool[table].write(cr, user, nids, v, context)
if unknown_fields:
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
self._validate(cr, user, ids, context)
# TODO: use _order to set dest at the right position and not first node of parent
# We can't defer parent_store computation because the stored function
# fields that are computer may refer (directly or indirectly) to
# parent_left/right (via a child_of domain)
if parents_changed:
if self.pool._init:
self.pool._init_parent[self._name] = True
else:
order = self._parent_order or self._order
parent_val = vals[self._parent_name]
if parent_val:
clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
else:
clause, params = '%s IS NULL' % (self._parent_name,), ()
for id in parents_changed:
cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
pleft, pright = cr.fetchone()
distance = pright - pleft + 1
# Positions of current siblings, to locate proper insertion point;
# this can _not_ be fetched outside the loop, as it needs to be refreshed
# after each update, in case several nodes are sequentially inserted one
# next to the other (i.e computed incrementally)
cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
parents = cr.fetchall()
# Find Position of the element
position = None
for (parent_pright, parent_id) in parents:
if parent_id == id:
break
position = parent_pright and parent_pright + 1 or 1
# It's the first node of the parent
if not position:
if not parent_val:
position = 1
else:
cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
position = cr.fetchone()[0] + 1
if pleft < position <= pright:
raise except_orm(_('UserError'), _('Recursivity Detected.'))
if pleft < position:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
else:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
result += self._store_get_values(cr, user, ids, vals.keys(), context)
result.sort()
done = {}
for order, model_name, ids_to_update, fields_to_recompute in result:
key = (model_name, tuple(fields_to_recompute))
done.setdefault(key, {})
# avoid to do several times the same computation
todo = []
for id in ids_to_update:
if id not in done[key]:
done[key][id] = True
todo.append(id)
self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
self.step_workflow(cr, user, ids, context=context)
return True
#
# TODO: Should set perm to user.xxx
#
def create(self, cr, user, vals, context=None):
"""
Create a new record for the model.
The values for the new record are initialized using the ``vals``
argument, and if necessary the result of ``default_get()``.
:param cr: database cursor
:param user: current user id
:type user: integer
:param vals: field values for new record, e.g {'field_name': field_value, ...}
:type vals: dictionary
:param context: optional context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
:type context: dictionary
:return: id of new record created
:raise AccessError: * if user has no create rights on the requested object
* if user tries to bypass access rules for create on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
**Note**: The type of field values to pass in ``vals`` for relationship fields is specific.
Please see the description of the :py:meth:`~osv.osv.osv.write` method for details about the possible values and how
to specify them.
"""
if not context:
context = {}
if self.is_transient():
self._transient_vacuum(cr, user)
self.check_access_rights(cr, user, 'create')
if self._log_access:
for f in LOG_ACCESS_COLUMNS:
if vals.pop(f, None) is not None:
_logger.warning(
'Field `%s` is not allowed when creating the model `%s`.',
f, self._name)
vals = self._add_missing_default_values(cr, user, vals, context)
tocreate = {}
for v in self._inherits:
if self._inherits[v] not in vals:
tocreate[v] = {}
else:
tocreate[v] = {'id': vals[self._inherits[v]]}
columns = [
# columns will contain a list of field defined as a tuple
# tuple(field_name, format_string, field_value)
# the tuple will be used by the string formatting for the INSERT
# statement.
('id', "nextval('%s')" % self._sequence),
]
upd_todo = []
unknown_fields = []
for v in vals.keys():
if v in self._inherit_fields and v not in self._columns:
(table, col, col_detail, original_parent) = self._inherit_fields[v]
tocreate[table][v] = vals[v]
del vals[v]
else:
if (v not in self._inherit_fields) and (v not in self._columns):
del vals[v]
unknown_fields.append(v)
if unknown_fields:
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
if not self._sequence:
raise except_orm(
_('UserError'),
_('You cannot perform this operation. New Record Creation is not allowed for this object as this object is for reporting purpose.')
)
for table in tocreate:
if self._inherits[table] in vals:
del vals[self._inherits[table]]
record_id = tocreate[table].pop('id', None)
# When linking/creating parent records, force context without 'no_store_function' key that
# defers stored functions computing, as these won't be computed in batch at the end of create().
parent_context = dict(context)
parent_context.pop('no_store_function', None)
if record_id is None or not record_id:
record_id = self.pool[table].create(cr, user, tocreate[table], context=parent_context)
else:
self.pool[table].write(cr, user, [record_id], tocreate[table], context=parent_context)
columns.append((self._inherits[table], '%s', record_id))
#Start : Set bool fields to be False if they are not touched(to make search more powerful)
bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
for bool_field in bool_fields:
if bool_field not in vals:
vals[bool_field] = False
#End
for field in vals.copy():
fobj = None
if field in self._columns:
fobj = self._columns[field]
else:
fobj = self._inherit_fields[field][2]
if not fobj:
continue
groups = fobj.write
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
elif readonly[0][0] == 0:
edit = False
else:
edit = False
if not edit:
vals.pop(field)
for field in vals:
current_field = self._columns[field]
if current_field._classic_write:
columns.append((field, '%s', current_field._symbol_set[1](vals[field])))
#for the function fields that receive a value, we set them directly in the database
#(they may be required), but we also need to trigger the _fct_inv()
if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
#TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
#one week of the release candidate. It seems the only good way to handle correctly this is to add an
#attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
#if, for example, the related has a default value (for usability) then the fct_inv is called and it
#may raise some access rights error. Changing this is a too big change for now, and is thus postponed
#after the release but, definitively, the behavior shouldn't be different for related and function
#fields.
upd_todo.append(field)
else:
#TODO: this `if´ statement should be removed because there is no good reason to special case the fields
#related. See the above TODO comment for further explanations.
if not isinstance(current_field, fields.related):
upd_todo.append(field)
if field in self._columns \
and hasattr(current_field, 'selection') \
and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if self._log_access:
columns.append(('create_uid', '%s', user))
columns.append(('write_uid', '%s', user))
columns.append(('create_date', "(now() at time zone 'UTC')"))
columns.append(('write_date', "(now() at time zone 'UTC')"))
# the list of tuples used in this formatting corresponds to
# tuple(field_name, format, value)
# In some case, for example (id, create_date, write_date) we does not
# need to read the third value of the tuple, because the real value is
# encoded in the second value (the format).
cr.execute(
"""INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
self._table,
', '.join('"%s"' % f[0] for f in columns),
', '.join(f[1] for f in columns)
),
tuple([f[2] for f in columns if len(f) > 2])
)
id_new, = cr.fetchone()
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
if self._parent_store and not context.get('defer_parent_store_computation'):
if self.pool._init:
self.pool._init_parent[self._name] = True
else:
parent = vals.get(self._parent_name, False)
if parent:
cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
pleft_old = None
result_p = cr.fetchall()
for (pleft,) in result_p:
if not pleft:
break
pleft_old = pleft
if not pleft_old:
cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
pleft_old = cr.fetchone()[0]
pleft = pleft_old
else:
cr.execute('select max(parent_right) from '+self._table)
pleft = cr.fetchone()[0] or 0
cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
# default element in context must be remove when call a one2many or many2many
rel_context = context.copy()
for c in context.items():
if c[0].startswith('default_'):
del rel_context[c[0]]
result = []
for field in upd_todo:
result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
self._validate(cr, user, [id_new], context)
if not context.get('no_store_function', False):
result += self._store_get_values(cr, user, [id_new],
list(set(vals.keys() + self._inherits.values())),
context)
result.sort()
done = []
for order, model_name, ids, fields2 in result:
if not (model_name, ids, fields2) in done:
self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
done.append((model_name, ids, fields2))
if self._log_create and not (context and context.get('no_store_function', False)):
message = self._description + \
" '" + \
self.name_get(cr, user, [id_new], context=context)[0][1] + \
"' " + _("created.")
self.log(cr, user, id_new, message, True, context=context)
self.check_access_rule(cr, user, [id_new], 'create', context=context)
self.create_workflow(cr, user, [id_new], context=context)
return id_new
def browse(self, cr, uid, select, context=None, list_class=None, fields_process=None):
"""Fetch records as objects allowing to use dot notation to browse fields and relations
:param cr: database cursor
:param uid: current user id
:param select: id or list of ids.
:param context: context arguments, like lang, time zone
:rtype: object or list of objects requested
"""
self._list_class = list_class or browse_record_list
cache = {}
# need to accepts ints and longs because ids coming from a method
# launched by button in the interface have a type long...
if isinstance(select, (int, long)):
return browse_record(cr, uid, select, self, cache, context=context, list_class=self._list_class, fields_process=fields_process)
elif isinstance(select, list):
return self._list_class((browse_record(cr, uid, id, self, cache, context=context, list_class=self._list_class, fields_process=fields_process) for id in select), context=context)
else:
return browse_null()
def _store_get_values(self, cr, uid, ids, fields, context):
"""Returns an ordered list of fields.function to call due to
an update operation on ``fields`` of records with ``ids``,
obtained by calling the 'store' triggers of these fields,
as setup by their 'store' attribute.
:return: [(priority, model_name, [record_ids,], [function_fields,])]
"""
if fields is None: fields = []
stored_functions = self.pool._store_function.get(self._name, [])
# use indexed names for the details of the stored_functions:
model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
# only keep store triggers that should be triggered for the ``fields``
# being written to.
triggers_to_compute = [f for f in stored_functions \
if ((not f[trigger_fields_]) or set(fields).intersection(f[trigger_fields_]))]
to_compute_map = {}
target_id_results = {}
for store_trigger in triggers_to_compute:
target_func_id_ = id(store_trigger[target_ids_func_])
if not target_func_id_ in target_id_results:
# use admin user for accessing objects having rules defined on store fields
target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
target_ids = target_id_results[target_func_id_]
# the compound key must consider the priority and model name
key = (store_trigger[priority_], store_trigger[model_name_])
for target_id in target_ids:
to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
# Here to_compute_map looks like:
# { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
# (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
# (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
# }
# Now we need to generate the batch function calls list
# call_map =
# { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
call_map = {}
for ((priority,model), id_map) in to_compute_map.iteritems():
trigger_ids_maps = {}
# function_ids_maps =
# { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
for target_id, triggers in id_map.iteritems():
trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
for triggers, target_ids in trigger_ids_maps.iteritems():
call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
[t[func_field_to_compute_] for t in triggers]))
ordered_keys = call_map.keys()
ordered_keys.sort()
result = []
if ordered_keys:
result = reduce(operator.add, (call_map[k] for k in ordered_keys))
return result
def _store_set_values(self, cr, uid, ids, fields, context):
"""Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
respecting ``multi`` attributes), and stores the resulting values in the database directly."""
if not ids:
return True
field_flag = False
field_dict = {}
if self._log_access:
cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
res = cr.fetchall()
for r in res:
if r[1]:
field_dict.setdefault(r[0], [])
res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
for i in self.pool._store_function.get(self._name, []):
if i[5]:
up_write_date = write_date + datetime.timedelta(hours=i[5])
if datetime.datetime.now() < up_write_date:
if i[1] in fields:
field_dict[r[0]].append(i[1])
if not field_flag:
field_flag = True
todo = {}
keys = []
for f in fields:
if self._columns[f]._multi not in keys:
keys.append(self._columns[f]._multi)
todo.setdefault(self._columns[f]._multi, [])
todo[self._columns[f]._multi].append(f)
for key in keys:
val = todo[key]
if key:
# use admin user for accessing objects having rules defined on store fields
result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
for id, value in result.items():
if field_flag:
for f in value.keys():
if f in field_dict[id]:
value.pop(f)
upd0 = []
upd1 = []
for v in value:
if v not in val:
continue
if self._columns[v]._type == 'many2one':
try:
value[v] = value[v][0]
except:
pass
upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
upd1.append(self._columns[v]._symbol_set[1](value[v]))
upd1.append(id)
if upd0 and upd1:
cr.execute('update "' + self._table + '" set ' + \
','.join(upd0) + ' where id = %s', upd1)
else:
for f in val:
# use admin user for accessing objects having rules defined on store fields
result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
for r in result.keys():
if field_flag:
if r in field_dict.keys():
if f in field_dict[r]:
result.pop(r)
for id, value in result.items():
if self._columns[f]._type == 'many2one':
try:
value = value[0]
except:
pass
cr.execute('update "' + self._table + '" set ' + \
'"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
return True
#
# TODO: Validate
#
def perm_write(self, cr, user, ids, fields, context=None):
raise NotImplementedError(_('This method does not exist anymore'))
# TODO: ameliorer avec NULL
def _where_calc(self, cr, user, domain, active_test=True, context=None):
"""Computes the WHERE clause needed to implement an OpenERP domain.
:param domain: the domain to compute
:type domain: list
:param active_test: whether the default filtering of records with ``active``
field set to ``False`` should be applied.
:return: the query expressing the given domain as provided in domain
:rtype: osv.query.Query
"""
if not context:
context = {}
domain = domain[:]
# if the object has a field named 'active', filter out all inactive
# records unless they were explicitely asked for
if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
if domain:
# the item[0] trick below works for domain items and '&'/'|'/'!'
# operators too
if not any(item[0] == 'active' for item in domain):
domain.insert(0, ('active', '=', 1))
else:
domain = [('active', '=', 1)]
if domain:
e = expression.expression(cr, user, domain, self, context)
tables = e.get_tables()
where_clause, where_params = e.to_sql()
where_clause = where_clause and [where_clause] or []
else:
where_clause, where_params, tables = [], [], ['"%s"' % self._table]
return Query(tables, where_clause, where_params)
def _check_qorder(self, word):
if not regex_order.match(word):
raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
return True
def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
"""Add what's missing in ``query`` to implement all appropriate ir.rules
(using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
:param query: the current query object
"""
if uid == SUPERUSER_ID:
return
def apply_rule(added_clause, added_params, added_tables, parent_model=None, child_object=None):
""" :param string parent_model: string of the parent model
:param model child_object: model object, base of the rule application
"""
if added_clause:
if parent_model and child_object:
# as inherited rules are being applied, we need to add the missing JOIN
# to reach the parent table (if it was not JOINed yet in the query)
parent_alias = child_object._inherits_join_add(child_object, parent_model, query)
# inherited rules are applied on the external table -> need to get the alias and replace
parent_table = self.pool[parent_model]._table
added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
# change references to parent_table to parent_alias, because we now use the alias to refer to the table
new_tables = []
for table in added_tables:
# table is just a table name -> switch to the full alias
if table == '"%s"' % parent_table:
new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
# table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
else:
new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
added_tables = new_tables
query.where_clause += added_clause
query.where_clause_params += added_params
for table in added_tables:
if table not in query.tables:
query.tables.append(table)
return True
return False
# apply main rules on the object
rule_obj = self.pool.get('ir.rule')
rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
# apply ir.rules from the parents (through _inherits)
for inherited_model in self._inherits:
rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
parent_model=inherited_model, child_object=self)
def _generate_m2o_order_by(self, order_field, query):
"""
Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
either native m2o fields or function/related fields that are stored, including
intermediate JOINs for inheritance if required.
:return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
"""
if order_field not in self._columns and order_field in self._inherit_fields:
# also add missing joins for reaching the table containing the m2o field
qualified_field = self._inherits_join_calc(order_field, query)
order_field_column = self._inherit_fields[order_field][2]
else:
qualified_field = '"%s"."%s"' % (self._table, order_field)
order_field_column = self._columns[order_field]
assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
_logger.debug("Many2one function/related fields must be stored " \
"to be used as ordering fields! Ignoring sorting for %s.%s",
self._name, order_field)
return
# figure out the applicable order_by for the m2o
dest_model = self.pool[order_field_column._obj]
m2o_order = dest_model._order
if not regex_order.match(m2o_order):
# _order is complex, can't use it here, so we default to _rec_name
m2o_order = dest_model._rec_name
else:
# extract the field names, to be able to qualify them and add desc/asc
m2o_order_list = []
for order_part in m2o_order.split(","):
m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
m2o_order = m2o_order_list
# Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
# as we don't want to exclude results that have NULL values for the m2o
src_table, src_field = qualified_field.replace('"', '').split('.', 1)
dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
def _generate_order_by(self, order_spec, query):
"""
Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
:raise" except_orm in case order_spec is malformed
"""
order_by_clause = ''
order_spec = order_spec or self._order
if order_spec:
order_by_elements = []
self._check_qorder(order_spec)
for order_part in order_spec.split(','):
order_split = order_part.strip().split(' ')
order_field = order_split[0].strip()
order_direction = order_split[1].strip() if len(order_split) == 2 else ''
inner_clause = None
if order_field == 'id' or (self._log_access and order_field in LOG_ACCESS_COLUMNS.keys()):
order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
elif order_field in self._columns:
order_column = self._columns[order_field]
if order_column._classic_read:
inner_clause = '"%s"."%s"' % (self._table, order_field)
elif order_column._type == 'many2one':
inner_clause = self._generate_m2o_order_by(order_field, query)
else:
continue # ignore non-readable or "non-joinable" fields
elif order_field in self._inherit_fields:
parent_obj = self.pool[self._inherit_fields[order_field][3]]
order_column = parent_obj._columns[order_field]
if order_column._classic_read:
inner_clause = self._inherits_join_calc(order_field, query)
elif order_column._type == 'many2one':
inner_clause = self._generate_m2o_order_by(order_field, query)
else:
continue # ignore non-readable or "non-joinable" fields
else:
raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
if inner_clause:
if isinstance(inner_clause, list):
for clause in inner_clause:
order_by_elements.append("%s %s" % (clause, order_direction))
else:
order_by_elements.append("%s %s" % (inner_clause, order_direction))
if order_by_elements:
order_by_clause = ",".join(order_by_elements)
return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
"""
Private implementation of search() method, allowing specifying the uid to use for the access right check.
This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
This is ok at the security level because this method is private and not callable through XML-RPC.
:param access_rights_uid: optional user ID to use when checking access rights
(not for ir.rules, this is only for ir.model.access)
"""
if context is None:
context = {}
self.check_access_rights(cr, access_rights_uid or user, 'read')
# For transient models, restrict acces to the current user, except for the super-user
if self.is_transient() and self._log_access and user != SUPERUSER_ID:
args = expression.AND(([('create_uid', '=', user)], args or []))
query = self._where_calc(cr, user, args, context=context)
self._apply_ir_rules(cr, user, query, 'read', context=context)
order_by = self._generate_order_by(order, query)
from_clause, where_clause, where_clause_params = query.get_sql()
limit_str = limit and ' limit %d' % limit or ''
offset_str = offset and ' offset %d' % offset or ''
where_str = where_clause and (" WHERE %s" % where_clause) or ''
query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
if count:
# /!\ the main query must be executed as a subquery, otherwise
# offset and limit apply to the result of count()!
cr.execute('SELECT count(*) FROM (%s) AS count' % query_str, where_clause_params)
res = cr.fetchone()
return res[0]
cr.execute(query_str, where_clause_params)
res = cr.fetchall()
# TDE note: with auto_join, we could have several lines about the same result
# i.e. a lead with several unread messages; we uniquify the result using
# a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
def _uniquify_list(seq):
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
return _uniquify_list([x[0] for x in res])
# returns the different values ever entered for one field
# this is used, for example, in the client when the user hits enter on
# a char field
def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
if not args:
args = []
if field in self._inherit_fields:
return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
else:
return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
def copy_data(self, cr, uid, id, default=None, context=None):
"""
Copy given record's data with all its fields values
:param cr: database cursor
:param uid: current user id
:param id: id of the record to copy
:param default: field values to override in the original values of the copied record
:type default: dictionary
:param context: context arguments, like lang, time zone
:type context: dictionary
:return: dictionary containing all the field values
"""
if context is None:
context = {}
# avoid recursion through already copied records in case of circular relationship
seen_map = context.setdefault('__copy_data_seen', {})
if id in seen_map.setdefault(self._name, []):
return
seen_map[self._name].append(id)
if default is None:
default = {}
if 'state' not in default:
if 'state' in self._defaults:
if callable(self._defaults['state']):
default['state'] = self._defaults['state'](self, cr, uid, context)
else:
default['state'] = self._defaults['state']
# build a black list of fields that should not be copied
blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
def blacklist_given_fields(obj):
# blacklist the fields that are given by inheritance
for other, field_to_other in obj._inherits.items():
blacklist.add(field_to_other)
if field_to_other in default:
# all the fields of 'other' are given by the record: default[field_to_other],
# except the ones redefined in self
blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
else:
blacklist_given_fields(self.pool[other])
# blacklist deprecated fields
for name, field in obj._columns.items():
if field.deprecated:
blacklist.add(name)
blacklist_given_fields(self)
fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
if f not in default
if f not in blacklist
if not isinstance(fi.column, fields.function))
data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
if data:
data = data[0]
else:
raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
res = dict(default)
for f, colinfo in fields_to_copy.iteritems():
field = colinfo.column
if field._type == 'many2one':
res[f] = data[f] and data[f][0]
elif field._type == 'one2many':
other = self.pool[field._obj]
# duplicate following the order of the ids because we'll rely on
# it later for copying translations in copy_translation()!
lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
# the lines are duplicated using the wrong (old) parent, but then
# are reassigned to the correct one thanks to the (0, 0, ...)
res[f] = [(0, 0, line) for line in lines if line]
elif field._type == 'many2many':
res[f] = [(6, 0, data[f])]
else:
res[f] = data[f]
return res
def copy_translations(self, cr, uid, old_id, new_id, context=None):
if context is None:
context = {}
# avoid recursion through already copied records in case of circular relationship
seen_map = context.setdefault('__copy_translations_seen',{})
if old_id in seen_map.setdefault(self._name,[]):
return
seen_map[self._name].append(old_id)
trans_obj = self.pool.get('ir.translation')
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
fields = self.fields_get(cr, uid, context=context)
for field_name, field_def in fields.items():
# removing the lang to compare untranslated values
context_wo_lang = dict(context, lang=None)
old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
# we must recursively copy the translations for o2o and o2m
if field_def['type'] == 'one2many':
target_obj = self.pool[field_def['relation']]
# here we rely on the order of the ids to match the translations
# as foreseen in copy_data()
old_children = sorted(r.id for r in old_record[field_name])
new_children = sorted(r.id for r in new_record[field_name])
for (old_child, new_child) in zip(old_children, new_children):
target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
# and for translatable fields we keep them for copy
elif field_def.get('translate'):
if field_name in self._columns:
trans_name = self._name + "," + field_name
target_id = new_id
source_id = old_id
elif field_name in self._inherit_fields:
trans_name = self._inherit_fields[field_name][0] + "," + field_name
# get the id of the parent record to set the translation
inherit_field_name = self._inherit_fields[field_name][1]
target_id = new_record[inherit_field_name].id
source_id = old_record[inherit_field_name].id
else:
continue
trans_ids = trans_obj.search(cr, uid, [
('name', '=', trans_name),
('res_id', '=', source_id)
])
user_lang = context.get('lang')
for record in trans_obj.read(cr, uid, trans_ids, context=context):
del record['id']
# remove source to avoid triggering _set_src
del record['source']
record.update({'res_id': target_id})
if user_lang and user_lang == record['lang']:
# 'source' to force the call to _set_src
# 'value' needed if value is changed in copy(), want to see the new_value
record['source'] = old_record[field_name]
record['value'] = new_record[field_name]
trans_obj.create(cr, uid, record, context=context)
def copy(self, cr, uid, id, default=None, context=None):
"""
Duplicate record with given id updating it with default values
:param cr: database cursor
:param uid: current user id
:param id: id of the record to copy
:param default: dictionary of field values to override in the original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
:type default: dictionary
:param context: context arguments, like lang, time zone
:type context: dictionary
:return: id of the newly created record
"""
if context is None:
context = {}
context = context.copy()
data = self.copy_data(cr, uid, id, default, context)
new_id = self.create(cr, uid, data, context)
self.copy_translations(cr, uid, id, new_id, context)
return new_id
def exists(self, cr, uid, ids, context=None):
"""Checks whether the given id or ids exist in this model,
and return the list of ids that do. This is simple to use for
a truth test on a browse_record::
if record.exists():
pass
:param ids: id or list of ids to check for existence
:type ids: int or [int]
:return: the list of ids that currently exist, out of
the given `ids`
"""
if type(ids) in (int, long):
ids = [ids]
if not ids:
return []
query = 'SELECT id FROM "%s"' % self._table
cr.execute(query + "WHERE ID IN %s", (tuple(ids),))
return [x[0] for x in cr.fetchall()]
def check_recursion(self, cr, uid, ids, context=None, parent=None):
_logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
self._name)
assert parent is None or parent in self._columns or parent in self._inherit_fields,\
"The 'parent' parameter passed to check_recursion() must be None or a valid field name"
return self._check_recursion(cr, uid, ids, context, parent)
def _check_recursion(self, cr, uid, ids, context=None, parent=None):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a loop
is detected or until a top-level record is found.
:param cr: database cursor
:param uid: current user id
:param ids: list of ids of records to check
:param parent: optional parent field name (default: ``self._parent_name = parent_id``)
:return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
if not parent:
parent = self._parent_name
# must ignore 'active' flag, ir.rules, etc. => direct SQL query
query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
for id in ids:
current_id = id
while current_id is not None:
cr.execute(query, (current_id,))
result = cr.fetchone()
current_id = result[0] if result else None
if current_id == id:
return False
return True
def _check_m2m_recursion(self, cr, uid, ids, field_name):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a loop
is detected or until a top-level record is found.
:param cr: database cursor
:param uid: current user id
:param ids: list of ids of records to check
:param field_name: field to check
:return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
field = self._all_columns.get(field_name)
field = field.column if field else None
if not field or field._type != 'many2many' or field._obj != self._name:
# field must be a many2many on itself
raise ValueError('invalid field_name: %r' % (field_name,))
query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
ids_parent = ids[:]
while ids_parent:
ids_parent2 = []
for i in range(0, len(ids_parent), cr.IN_MAX):
j = i + cr.IN_MAX
sub_ids_parent = ids_parent[i:j]
cr.execute(query, (tuple(sub_ids_parent),))
ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
ids_parent = ids_parent2
for i in ids_parent:
if i in ids:
return False
return True
def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID(s) of any database record.
**Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
:return: map of ids to the list of their fully qualified External IDs
in the form ``module.key``, or an empty list when there's no External
ID for a record, e.g.::
{ 'id': ['module.ext_id', 'module.ext_id_bis'],
'id2': [] }
"""
ir_model_data = self.pool.get('ir.model.data')
data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
result = {}
for id in ids:
# can't use dict.fromkeys() as the list would be shared!
result[id] = []
for record in data_results:
result[record['res_id']].append('%(module)s.%(name)s' % record)
return result
def get_external_id(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID of any database record, if there
is one. This method works as a possible implementation
for a function field, to be able to add it to any
model object easily, referencing it as ``Model.get_external_id``.
When multiple External IDs exist for a record, only one
of them is returned (randomly).
:return: map of ids to their fully qualified XML ID,
defaulting to an empty string when there's none
(to be usable as a function field),
e.g.::
{ 'id': 'module.ext_id',
'id2': '' }
"""
results = self._get_xml_ids(cr, uid, ids)
for k, v in results.iteritems():
if results[k]:
results[k] = v[0]
else:
results[k] = ''
return results
# backwards compatibility
get_xml_id = get_external_id
_get_xml_ids = _get_external_ids
def print_report(self, cr, uid, ids, name, data, context=None):
"""
Render the report `name` for the given IDs. The report must be defined
for this model, not another.
"""
report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
assert self._name == report.table
return report.create(cr, uid, ids, data, context)
# Transience
def is_transient(self):
""" Return whether the model is transient.
See :class:`TransientModel`.
"""
return self._transient
def _transient_clean_rows_older_than(self, cr, seconds):
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
# Never delete rows used in last 5 minutes
seconds = max(seconds, 300)
query = ("SELECT id FROM " + self._table + " WHERE"
" COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
" < ((now() at time zone 'UTC') - interval %s)")
cr.execute(query, ("%s seconds" % seconds,))
ids = [x[0] for x in cr.fetchall()]
self.unlink(cr, SUPERUSER_ID, ids)
def _transient_clean_old_rows(self, cr, max_count):
# Check how many rows we have in the table
cr.execute("SELECT count(*) AS row_count FROM " + self._table)
res = cr.fetchall()
if res[0][0] <= max_count:
return # max not reached, nothing to do
self._transient_clean_rows_older_than(cr, 300)
def _transient_vacuum(self, cr, uid, force=False):
"""Clean the transient records.
This unlinks old records from the transient model tables whenever the
"_transient_max_count" or "_max_age" conditions (if any) are reached.
Actual cleaning will happen only once every "_transient_check_time" calls.
This means this method can be called frequently called (e.g. whenever
a new record is created).
Example with both max_hours and max_count active:
Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
- age based vacuum will leave the 22 rows created/changed in the last 12 minutes
- count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
would immediately cause the maximum to be reached again.
- the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
"""
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
_transient_check_time = 20 # arbitrary limit on vacuum executions
self._transient_check_count += 1
if not force and (self._transient_check_count < _transient_check_time):
return True # no vacuum cleaning this time
self._transient_check_count = 0
# Age-based expiration
if self._transient_max_hours:
self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
# Count-based expiration
if self._transient_max_count:
self._transient_clean_old_rows(cr, self._transient_max_count)
return True
def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
""" Serializes one2many and many2many commands into record dictionaries
(as if all the records came from the database via a read()). This
method is aimed at onchange methods on one2many and many2many fields.
Because commands might be creation commands, not all record dicts
will contain an ``id`` field. Commands matching an existing record
will have an ``id``.
:param field_name: name of the one2many or many2many field matching the commands
:type field_name: str
:param commands: one2many or many2many commands to execute on ``field_name``
:type commands: list((int|False, int|False, dict|False))
:param fields: list of fields to read from the database, when applicable
:type fields: list(str)
:returns: records in a shape similar to that returned by ``read()``
(except records may be missing the ``id`` field if they don't exist in db)
:rtype: list(dict)
"""
result = [] # result (list of dict)
record_ids = [] # ids of records to read
updates = {} # {id: dict} of updates on particular records
for command in commands:
if not isinstance(command, (list, tuple)):
record_ids.append(command)
elif command[0] == 0:
result.append(command[2])
elif command[0] == 1:
record_ids.append(command[1])
updates.setdefault(command[1], {}).update(command[2])
elif command[0] in (2, 3):
record_ids = [id for id in record_ids if id != command[1]]
elif command[0] == 4:
record_ids.append(command[1])
elif command[0] == 5:
result, record_ids = [], []
elif command[0] == 6:
result, record_ids = [], list(command[2])
# read the records and apply the updates
other_model = self.pool[self._all_columns[field_name].column._obj]
for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
record.update(updates.get(record['id'], {}))
result.append(record)
return result
# for backward compatibility
resolve_o2m_commands_to_record_dicts = resolve_2many_commands
def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
"""
Performs a ``search()`` followed by a ``read()``.
:param cr: database cursor
:param user: current user id
:param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
:param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
:param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
:param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
:param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
:param context: context arguments.
:return: List of dictionaries containing the asked fields.
:rtype: List of dictionaries.
"""
record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
if not record_ids:
return []
if fields and fields == ['id']:
# shortcut read if we only want the ids
return [{'id': id} for id in record_ids]
# read() ignores active_test, but it would forward it to any downstream search call
# (e.g. for x2m or function fields), and this is not the desired behavior, the flag
# was presumably only meant for the main search().
# TODO: Move this to read() directly?
read_ctx = dict(context or {})
read_ctx.pop('active_test', None)
result = self.read(cr, uid, record_ids, fields, context=read_ctx)
if len(result) <= 1:
return result
# reorder read
index = dict((r['id'], r) for r in result)
return [index[x] for x in record_ids if x in index]
def _register_hook(self, cr):
""" stuff to do right after the registry is built """
pass
def __getattr__(self, name):
if name.startswith('signal_'):
signal_name = name[len('signal_'):]
assert signal_name
return (lambda *args, **kwargs:
self.signal_workflow(*args, signal=signal_name, **kwargs))
get = getattr(super(BaseModel, self), '__getattr__', None)
if get is not None: return get(name)
raise AttributeError(
"'%s' object has no attribute '%s'" % (type(self).__name__, name))
# keep this import here, at top it will cause dependency cycle errors
import expression
class Model(BaseModel):
"""Main super-class for regular database-persisted OpenERP models.
OpenERP models are created by inheriting from this class::
class user(Model):
...
The system will later instantiate the class once per database (on
which the class' module is installed).
"""
_auto = True
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False # True in a TransientModel
class TransientModel(BaseModel):
"""Model super-class for transient records, meant to be temporarily
persisted, and regularly vaccuum-cleaned.
A TransientModel has a simplified access rights management,
all users can create new records, and may only access the
records they created. The super-user has unrestricted access
to all TransientModel records.
"""
_auto = True
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = True
class AbstractModel(BaseModel):
"""Abstract Model super-class for creating an abstract class meant to be
inherited by regular models (Models or TransientModels) but not meant to
be usable on its own, or persisted.
Technical note: we don't want to make AbstractModel the super-class of
Model or BaseModel because it would not make sense to put the main
definition of persistence methods such as create() in it, and still we
should be able to override them within an AbstractModel.
"""
_auto = False # don't create any database backend for AbstractModels
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False
def itemgetter_tuple(items):
""" Fixes itemgetter inconsistency (useful in some cases) of not returning
a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
"""
if len(items) == 0:
return lambda a: ()
if len(items) == 1:
return lambda gettable: (gettable[items[0]],)
return operator.itemgetter(*items)
class ImportWarning(Warning):
""" Used to send warnings upwards the stack during the import process
"""
pass
def convert_pgerror_23502(model, fields, info, e):
m = re.match(r'^null value in column "(?P<field>\w+)" violates '
r'not-null constraint\n',
str(e))
field_name = m and m.group('field')
if not m or field_name not in fields:
return {'message': unicode(e)}
message = _(u"Missing required value for the field '%s'.") % field_name
field = fields.get(field_name)
if field:
message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
return {
'message': message,
'field': field_name,
}
def convert_pgerror_23505(model, fields, info, e):
m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
str(e))
field_name = m and m.group('field')
if not m or field_name not in fields:
return {'message': unicode(e)}
message = _(u"The value for the field '%s' already exists.") % field_name
field = fields.get(field_name)
if field:
message = _(u"%s This might be '%s' in the current model, or a field "
u"of the same name in an o2m.") % (message, field['string'])
return {
'message': message,
'field': field_name,
}
PGERROR_TO_OE = collections.defaultdict(
# shape of mapped converters
lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
# not_null_violation
'23502': convert_pgerror_23502,
# unique constraint error
'23505': convert_pgerror_23505,
})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
thesuperzapper/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/wishart_test.py | 38 | 14437 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Wishart."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import linalg
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
distributions = distributions_lib
def make_pd(start, n):
"""Deterministically create a positive definite matrix."""
x = np.tril(linalg.circulant(np.arange(start, start + n)))
return np.dot(x, x.T)
def chol(x):
"""Compute Cholesky factorization."""
return linalg.cholesky(x).T
def wishart_var(df, x):
"""Compute Wishart variance for numpy scale matrix."""
x = np.sqrt(df) * np.asarray(x)
d = np.expand_dims(np.diag(x), -1)
return x**2 + np.dot(d, d.T)
class WishartCholeskyTest(test.TestCase):
def testEntropy(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
# sp.stats.wishart(df=4, scale=make_pd(1., 2)).entropy()
self.assertAllClose(6.301387092430769, w.entropy().eval())
w = distributions.WishartCholesky(df=1, scale=[[1.]])
# sp.stats.wishart(df=1,scale=1).entropy()
self.assertAllClose(0.78375711047393404, w.entropy().eval())
def testMeanLogDetAndLogNormalizingConstant(self):
with self.test_session():
def entropy_alt(w):
return (
w.log_normalization()
- 0.5 * (w.df - w.dimension - 1.) * w.mean_log_det()
+ 0.5 * w.df * w.dimension).eval()
w = distributions.WishartCholesky(df=4,
scale=chol(make_pd(1., 2)))
self.assertAllClose(w.entropy().eval(), entropy_alt(w))
w = distributions.WishartCholesky(df=5, scale=[[1.]])
self.assertAllClose(w.entropy().eval(), entropy_alt(w))
def testMean(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(df * scale, w.mean().eval())
def testMode(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual((df - 2. - 1.) * scale, w.mode().eval())
def testStd(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(chol(wishart_var(df, scale)), w.stddev().eval())
def testVariance(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(wishart_var(df, scale), w.variance().eval())
def testSample(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
chol_w = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=False)
x = chol_w.sample(1, seed=42).eval()
chol_x = [chol(x[0])]
full_w = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=False)
self.assertAllClose(x, full_w.sample(1, seed=42).eval())
chol_w_chol = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, chol_w_chol.sample(1, seed=42).eval())
eigen_values = array_ops.matrix_diag_part(
chol_w_chol.sample(
1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
full_w_chol = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, full_w_chol.sample(1, seed=42).eval())
eigen_values = array_ops.matrix_diag_part(
full_w_chol.sample(
1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
# Check first and second moments.
df = 4.
chol_w = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False)
x = chol_w.sample(10000, seed=42)
self.assertAllEqual((10000, 3, 3), x.get_shape())
moment1_estimate = math_ops.reduce_mean(x, reduction_indices=[0]).eval()
self.assertAllClose(chol_w.mean().eval(), moment1_estimate, rtol=0.05)
# The Variance estimate uses the squares rather than outer-products
# because Wishart.Variance is the diagonal of the Wishart covariance
# matrix.
variance_estimate = (math_ops.reduce_mean(
math_ops.square(x), reduction_indices=[0]) -
math_ops.square(moment1_estimate)).eval()
self.assertAllClose(
chol_w.variance().eval(), variance_estimate, rtol=0.05)
# Test that sampling with the same seed twice gives the same results.
def testSampleMultipleTimes(self):
with self.test_session():
df = 4.
n_val = 100
random_seed.set_random_seed(654321)
chol_w1 = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False,
name="wishart1")
samples1 = chol_w1.sample(n_val, seed=123456).eval()
random_seed.set_random_seed(654321)
chol_w2 = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False,
name="wishart2")
samples2 = chol_w2.sample(n_val, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testProb(self):
with self.test_session():
# Generate some positive definite (pd) matrices and their Cholesky
# factorizations.
x = np.array(
[make_pd(1., 2), make_pd(2., 2), make_pd(3., 2), make_pd(4., 2)])
chol_x = np.array([chol(x[0]), chol(x[1]), chol(x[2]), chol(x[3])])
# Since Wishart wasn"t added to SciPy until 0.16, we'll spot check some
# pdfs with hard-coded results from upstream SciPy.
log_prob_df_seq = np.array([
# math.log(stats.wishart.pdf(x[0], df=2+0, scale=x[0]))
-3.5310242469692907,
# math.log(stats.wishart.pdf(x[1], df=2+1, scale=x[1]))
-7.689907330328961,
# math.log(stats.wishart.pdf(x[2], df=2+2, scale=x[2]))
-10.815845159537895,
# math.log(stats.wishart.pdf(x[3], df=2+3, scale=x[3]))
-13.640549882916691,
])
# This test checks that batches don't interfere with correctness.
w = distributions.WishartCholesky(
df=[2, 3, 4, 5],
scale=chol_x,
cholesky_input_output_matrices=True)
self.assertAllClose(log_prob_df_seq, w.log_prob(chol_x).eval())
# Now we test various constructions of Wishart with different sample
# shape.
log_prob = np.array([
# math.log(stats.wishart.pdf(x[0], df=4, scale=x[0]))
-4.224171427529236,
# math.log(stats.wishart.pdf(x[1], df=4, scale=x[0]))
-6.3378770664093453,
# math.log(stats.wishart.pdf(x[2], df=4, scale=x[0]))
-12.026946850193017,
# math.log(stats.wishart.pdf(x[3], df=4, scale=x[0]))
-20.951582705289454,
])
for w in (
distributions.WishartCholesky(
df=4,
scale=chol_x[0],
cholesky_input_output_matrices=False),
distributions.WishartFull(
df=4,
scale=x[0],
cholesky_input_output_matrices=False)):
self.assertAllEqual((2, 2), w.event_shape_tensor().eval())
self.assertEqual(2, w.dimension.eval())
self.assertAllClose(log_prob[0], w.log_prob(x[0]).eval())
self.assertAllClose(log_prob[0:2], w.log_prob(x[0:2]).eval())
self.assertAllClose(
np.reshape(log_prob, (2, 2)),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).eval())
self.assertAllClose(
np.reshape(np.exp(log_prob), (2, 2)),
w.prob(np.reshape(x, (2, 2, 2, 2))).eval())
self.assertAllEqual((2, 2),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape())
for w in (
distributions.WishartCholesky(
df=4,
scale=chol_x[0],
cholesky_input_output_matrices=True),
distributions.WishartFull(
df=4,
scale=x[0],
cholesky_input_output_matrices=True)):
self.assertAllEqual((2, 2), w.event_shape_tensor().eval())
self.assertEqual(2, w.dimension.eval())
self.assertAllClose(log_prob[0], w.log_prob(chol_x[0]).eval())
self.assertAllClose(log_prob[0:2], w.log_prob(chol_x[0:2]).eval())
self.assertAllClose(
np.reshape(log_prob, (2, 2)),
w.log_prob(np.reshape(chol_x, (2, 2, 2, 2))).eval())
self.assertAllClose(
np.reshape(np.exp(log_prob), (2, 2)),
w.prob(np.reshape(chol_x, (2, 2, 2, 2))).eval())
self.assertAllEqual((2, 2),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape())
def testBatchShape(self):
with self.test_session() as sess:
scale = make_pd(1., 2)
chol_scale = chol(scale)
w = distributions.WishartCholesky(df=4, scale=chol_scale)
self.assertAllEqual([], w.batch_shape)
self.assertAllEqual([], w.batch_shape_tensor().eval())
w = distributions.WishartCholesky(
df=[4., 4], scale=np.array([chol_scale, chol_scale]))
self.assertAllEqual([2], w.batch_shape)
self.assertAllEqual([2], w.batch_shape_tensor().eval())
scale_deferred = array_ops.placeholder(dtypes.float32)
w = distributions.WishartCholesky(df=4, scale=scale_deferred)
self.assertAllEqual(
[], sess.run(w.batch_shape_tensor(),
feed_dict={scale_deferred: chol_scale}))
self.assertAllEqual(
[2],
sess.run(w.batch_shape_tensor(),
feed_dict={scale_deferred: [chol_scale, chol_scale]}))
def testEventShape(self):
with self.test_session() as sess:
scale = make_pd(1., 2)
chol_scale = chol(scale)
w = distributions.WishartCholesky(df=4, scale=chol_scale)
self.assertAllEqual([2, 2], w.event_shape)
self.assertAllEqual([2, 2], w.event_shape_tensor().eval())
w = distributions.WishartCholesky(
df=[4., 4], scale=np.array([chol_scale, chol_scale]))
self.assertAllEqual([2, 2], w.event_shape)
self.assertAllEqual([2, 2], w.event_shape_tensor().eval())
scale_deferred = array_ops.placeholder(dtypes.float32)
w = distributions.WishartCholesky(df=4, scale=scale_deferred)
self.assertAllEqual(
[2, 2],
sess.run(w.event_shape_tensor(),
feed_dict={scale_deferred: chol_scale}))
self.assertAllEqual(
[2, 2],
sess.run(w.event_shape_tensor(),
feed_dict={scale_deferred: [chol_scale, chol_scale]}))
def testValidateArgs(self):
with self.test_session() as sess:
df_deferred = array_ops.placeholder(dtypes.float32)
chol_scale_deferred = array_ops.placeholder(dtypes.float32)
x = make_pd(1., 3)
chol_scale = chol(x)
# Check expensive, deferred assertions.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"cannot be less than"):
chol_w = distributions.WishartCholesky(
df=df_deferred,
scale=chol_scale_deferred,
validate_args=True)
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 2.,
chol_scale_deferred: chol_scale})
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Cholesky decomposition was not successful"):
chol_w = distributions.WishartFull(
df=df_deferred, scale=chol_scale_deferred)
# np.ones((3, 3)) is not positive, definite.
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={
df_deferred: 4.,
chol_scale_deferred: np.ones(
(3, 3), dtype=np.float32)
})
# Ensure no assertions.
chol_w = distributions.WishartCholesky(
df=df_deferred,
scale=chol_scale_deferred,
validate_args=False)
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 4,
chol_scale_deferred: chol_scale})
# Bogus log_prob, but since we have no checks running... c"est la vie.
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 4,
chol_scale_deferred: np.ones((3, 3))})
# Still has these assertions because they're resolveable at graph
# construction
with self.assertRaisesRegexp(ValueError, "cannot be less than"):
chol_w = distributions.WishartCholesky(
df=2, scale=chol_scale, validate_args=False)
with self.assertRaisesRegexp(TypeError, "not a floating-point type"):
chol_w = distributions.WishartCholesky(
df=4.,
scale=np.asarray(
chol_scale, dtype=np.int32),
validate_args=False)
if __name__ == "__main__":
test.main()
| apache-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/django/shortcuts/__init__.py | 254 | 4642 | """
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
from django.template import loader, RequestContext
from django.http import HttpResponse, Http404
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.core import urlresolvers
def render_to_response(*args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
httpresponse_kwargs = {'mimetype': kwargs.pop('mimetype', None)}
return HttpResponse(loader.render_to_string(*args, **kwargs), **httpresponse_kwargs)
def render(request, *args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
httpresponse_kwargs = {
'content_type': kwargs.pop('content_type', None),
'status': kwargs.pop('status', None),
}
if 'context_instance' in kwargs:
context_instance = kwargs.pop('context_instance')
if kwargs.get('current_app', None):
raise ValueError('If you provide a context_instance you must '
'set its current_app before calling render()')
else:
current_app = kwargs.pop('current_app', None)
context_instance = RequestContext(request, current_app=current_app)
kwargs['context_instance'] = context_instance
return HttpResponse(loader.render_to_string(*args, **kwargs),
**httpresponse_kwargs)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the apropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return redirect_class(to.get_absolute_url())
# Next try a reverse URL resolution.
try:
return redirect_class(urlresolvers.reverse(to, args=args, kwargs=kwargs))
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return redirect_class(to)
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
else:
manager = klass._default_manager
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
| agpl-3.0 |
ychfan/tensorflow | tensorflow/python/ops/distributions/identity_bijector.py | 73 | 1840 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Identity bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.ops.distributions import bijector
__all__ = [
"Identity",
]
class Identity(bijector.Bijector):
"""Compute Y = g(X) = X.
Example Use:
```python
# Create the Y=g(X)=X transform which is intended for Tensors with 1 batch
# ndim and 1 event ndim (i.e., vector of vectors).
identity = Identity(event_ndims=1)
x = [[1., 2],
[3, 4]]
x == identity.forward(x) == identity.inverse(x)
```
"""
def __init__(self, validate_args=False, event_ndims=0, name="identity"):
super(Identity, self).__init__(
is_constant_jacobian=True,
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _forward(self, x):
return x
def _inverse(self, y):
return y
def _inverse_log_det_jacobian(self, y):
return constant_op.constant(0., dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
return constant_op.constant(0., dtype=x.dtype)
| apache-2.0 |
perillamint/Kite2 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
ol-loginov/intellij-community | python/lib/Lib/encodings/ascii.py | 858 | 1248 | """ Python 'ascii' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.ascii_encode
decode = codecs.ascii_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.ascii_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.ascii_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
class StreamConverter(StreamWriter,StreamReader):
encode = codecs.ascii_decode
decode = codecs.ascii_encode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='ascii',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| apache-2.0 |
ycaihua/kbengine | kbe/src/lib/python/Lib/test/test_xml_etree_c.py | 80 | 3005 | # xml.etree test for cElementTree
import sys, struct
from test import support
from test.support import import_fresh_module
import types
import unittest
cET = import_fresh_module('xml.etree.ElementTree',
fresh=['_elementtree'])
cET_alias = import_fresh_module('xml.etree.cElementTree',
fresh=['_elementtree', 'xml.etree'])
class MiscTests(unittest.TestCase):
# Issue #8651.
@support.bigmemtest(size=support._2G + 100, memuse=1, dry_run=False)
def test_length_overflow(self, size):
data = b'x' * size
parser = cET.XMLParser()
try:
self.assertRaises(OverflowError, parser.feed, data)
finally:
data = None
@unittest.skipUnless(cET, 'requires _elementtree')
class TestAliasWorking(unittest.TestCase):
# Test that the cET alias module is alive
def test_alias_working(self):
e = cET_alias.Element('foo')
self.assertEqual(e.tag, 'foo')
@unittest.skipUnless(cET, 'requires _elementtree')
@support.cpython_only
class TestAcceleratorImported(unittest.TestCase):
# Test that the C accelerator was imported, as expected
def test_correct_import_cET(self):
# SubElement is a function so it retains _elementtree as its module.
self.assertEqual(cET.SubElement.__module__, '_elementtree')
def test_correct_import_cET_alias(self):
self.assertEqual(cET_alias.SubElement.__module__, '_elementtree')
def test_parser_comes_from_C(self):
# The type of methods defined in Python code is types.FunctionType,
# while the type of methods defined inside _elementtree is
# <class 'wrapper_descriptor'>
self.assertNotIsInstance(cET.Element.__init__, types.FunctionType)
@unittest.skipUnless(cET, 'requires _elementtree')
@support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.elementsize = support.calcobjsize('5P')
# extra
self.extra = struct.calcsize('PiiP4P')
check_sizeof = support.check_sizeof
def test_element(self):
e = cET.Element('a')
self.check_sizeof(e, self.elementsize)
def test_element_with_attrib(self):
e = cET.Element('a', href='about:')
self.check_sizeof(e, self.elementsize + self.extra)
def test_element_with_children(self):
e = cET.Element('a')
for i in range(5):
cET.SubElement(e, 'span')
# should have space for 8 children now
self.check_sizeof(e, self.elementsize + self.extra +
struct.calcsize('8P'))
def test_main():
from test import test_xml_etree, test_xml_etree_c
# Run the tests specific to the C implementation
support.run_unittest(
MiscTests,
TestAliasWorking,
TestAcceleratorImported,
SizeofTest,
)
# Run the same test suite as the Python module
test_xml_etree.test_main(module=cET)
if __name__ == '__main__':
test_main()
| lgpl-3.0 |
vmindru/ansible | test/units/modules/network/enos/enos_module.py | 31 | 3229 | # Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.compat import unittest
from units.compat.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class AnsibleExitJson(Exception):
pass
class AnsibleFailJson(Exception):
pass
class TestEnosModule(unittest.TestCase):
def execute_module(self, failed=False, changed=False, commands=None,
sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']),
result['commands'])
else:
self.assertEqual(commands, result['commands'],
result['commands'])
return result
def failed(self):
def fail_json(*args, **kwargs):
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
with patch.object(basic.AnsibleModule, 'fail_json', fail_json):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
def exit_json(*args, **kwargs):
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
with patch.object(basic.AnsibleModule, 'exit_json', exit_json):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
| gpl-3.0 |
wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/lib/requests/packages/urllib3/response.py | 196 | 12240 | import zlib
import io
from socket import timeout as SocketTimeout
from ._collections import HTTPHeaderDict
from .exceptions import ProtocolError, DecodeError, ReadTimeoutError
from .packages.six import string_types as basestring, binary_type, PY3
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except HTTPException as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
| apache-2.0 |
rbaindourov/v8-inspector | Source/chrome/tools/metrics/histograms/PRESUBMIT.py | 43 | 1430 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
def CheckChange(input_api, output_api):
"""Checks that histograms.xml is pretty-printed and well-formatted."""
for f in input_api.AffectedTextFiles():
p = f.AbsoluteLocalPath()
if (input_api.basename(p) == 'histograms.xml'
and input_api.os_path.dirname(p) == input_api.PresubmitLocalPath()):
cwd = input_api.os_path.dirname(p)
exit_code = input_api.subprocess.call(
['python', 'pretty_print.py', '--presubmit'], cwd=cwd)
if exit_code != 0:
return [output_api.PresubmitError(
'histograms.xml is not formatted correctly; run pretty_print.py '
'to fix')]
exit_code = input_api.subprocess.call(
['python', 'validate_format.py'], cwd=cwd)
if exit_code != 0:
return [output_api.PresubmitError(
'histograms.xml is not well formatted; run validate_format.py '
'and fix the reported errors')]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
| bsd-3-clause |
pjg101/SickRage | lib/github/tests/Label.py | 8 | 2813 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
class Label(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.label = self.g.get_user().get_repo("PyGithub").get_label("Bug")
def testAttributes(self):
self.assertEqual(self.label.color, "e10c02")
self.assertEqual(self.label.name, "Bug")
self.assertEqual(self.label.url, "https://api.github.com/repos/jacquev6/PyGithub/labels/Bug")
# test __repr__() based on this attributes
self.assertEqual(self.label.__repr__(), 'Label(name="Bug")')
def testEdit(self):
self.label.edit("LabelEditedByPyGithub", "0000ff")
self.assertEqual(self.label.color, "0000ff")
self.assertEqual(self.label.name, "LabelEditedByPyGithub")
self.assertEqual(self.label.url, "https://api.github.com/repos/jacquev6/PyGithub/labels/LabelEditedByPyGithub")
def testDelete(self):
self.label.delete()
| gpl-3.0 |
cesarmarinhorj/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/scm/detection.py | 164 | 3834 | # Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.system.executive import Executive
from .svn import SVN
from .git import Git
_log = logging.getLogger(__name__)
class SCMDetector(object):
def __init__(self, filesystem, executive):
self._filesystem = filesystem
self._executive = executive
def default_scm(self, patch_directories=None):
"""Return the default SCM object as determined by the CWD and running code.
Returns the default SCM object for the current working directory; if the
CWD is not in a checkout, then we attempt to figure out if the SCM module
itself is part of a checkout, and return that one. If neither is part of
a checkout, None is returned.
"""
cwd = self._filesystem.getcwd()
scm_system = self.detect_scm_system(cwd, patch_directories)
if not scm_system:
script_directory = self._filesystem.dirname(self._filesystem.path_to_module(self.__module__))
scm_system = self.detect_scm_system(script_directory, patch_directories)
if scm_system:
_log.info("The current directory (%s) is not a WebKit checkout, using %s" % (cwd, scm_system.checkout_root))
else:
raise Exception("FATAL: Failed to determine the SCM system for either %s or %s" % (cwd, script_directory))
return scm_system
def detect_scm_system(self, path, patch_directories=None):
absolute_path = self._filesystem.abspath(path)
if patch_directories == []:
patch_directories = None
if SVN.in_working_directory(absolute_path, executive=self._executive):
return SVN(cwd=absolute_path, patch_directories=patch_directories, filesystem=self._filesystem, executive=self._executive)
if Git.in_working_directory(absolute_path, executive=self._executive):
return Git(cwd=absolute_path, filesystem=self._filesystem, executive=self._executive)
return None
# FIXME: These free functions are all deprecated:
def detect_scm_system(path, patch_directories=None):
return SCMDetector(FileSystem(), Executive()).detect_scm_system(path, patch_directories)
| bsd-3-clause |
orchidinfosys/odoo | addons/l10n_be_hr_payroll/l10n_be_hr_payroll.py | 47 | 2143 | #-*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class hr_contract_be(osv.osv):
_inherit = 'hr.contract'
_columns = {
'travel_reimbursement_amount': fields.float('Reimbursement of travel expenses', digits_compute=dp.get_precision('Payroll')),
'car_company_amount': fields.float('Company car employer', digits_compute=dp.get_precision('Payroll')),
'car_employee_deduction': fields.float('Company Car Deduction for Worker', digits_compute=dp.get_precision('Payroll')),
'misc_onss_deduction': fields.float('Miscellaneous exempt ONSS ', digits_compute=dp.get_precision('Payroll')),
'meal_voucher_amount': fields.float('Check Value Meal ', digits_compute=dp.get_precision('Payroll')),
'meal_voucher_employee_deduction': fields.float('Check Value Meal - by worker ', digits_compute=dp.get_precision('Payroll')),
'insurance_employee_deduction': fields.float('Insurance Group - by worker ', digits_compute=dp.get_precision('Payroll')),
'misc_advantage_amount': fields.float('Benefits of various nature ', digits_compute=dp.get_precision('Payroll')),
'additional_net_amount': fields.float('Net supplements', digits_compute=dp.get_precision('Payroll')),
'retained_net_amount': fields.float('Net retained ', digits_compute=dp.get_precision('Payroll')),
}
class hr_employee_be(osv.osv):
_inherit = 'hr.employee'
_columns = {
'spouse_fiscal_status': fields.selection([('without income','Without Income'),('with income','With Income')], 'Tax status for spouse'),
'disabled_spouse_bool': fields.boolean('Disabled Spouse', help="if recipient spouse is declared disabled by law"),
'disabled_children_bool': fields.boolean('Disabled Children', help="if recipient children is/are declared disabled by law"),
'resident_bool': fields.boolean('Nonresident', help="if recipient lives in a foreign country"),
'disabled_children_number': fields.integer('Number of disabled children'),
}
| gpl-3.0 |
WorldMG/production-email | lib/flask/logging.py | 838 | 1398 | # -*- coding: utf-8 -*-
"""
flask.logging
~~~~~~~~~~~~~
Implements the logging support for Flask.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from logging import getLogger, StreamHandler, Formatter, getLoggerClass, DEBUG
def create_logger(app):
"""Creates a logger for the given application. This logger works
similar to a regular Python logger but changes the effective logging
level based on the application's debug flag. Furthermore this
function also removes all attached handlers in case there was a
logger with the log name before.
"""
Logger = getLoggerClass()
class DebugLogger(Logger):
def getEffectiveLevel(x):
if x.level == 0 and app.debug:
return DEBUG
return Logger.getEffectiveLevel(x)
class DebugHandler(StreamHandler):
def emit(x, record):
StreamHandler.emit(x, record) if app.debug else None
handler = DebugHandler()
handler.setLevel(DEBUG)
handler.setFormatter(Formatter(app.debug_log_format))
logger = getLogger(app.logger_name)
# just in case that was not a new logger, get rid of all the handlers
# already attached to it.
del logger.handlers[:]
logger.__class__ = DebugLogger
logger.addHandler(handler)
return logger
| apache-2.0 |
awkspace/ansible | lib/ansible/modules/network/f5/bigip_monitor_external.py | 14 | 23275 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_monitor_external
short_description: Manages external LTM monitors on a BIG-IP
description:
- Manages external LTM monitors on a BIG-IP.
version_added: 2.6
options:
name:
description:
- Specifies the name of the monitor.
required: True
description:
description:
- The description of the monitor.
version_added: 2.7
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(http)
parent on the C(Common) partition.
default: /Common/external
arguments:
description:
- Specifies any command-line arguments that the script requires.
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'. Note that if specifying an IP address, a value between 1 and 65535
must be specified.
external_program:
description:
- Specifies the name of the file for the monitor to use. In order to reference
a file, you must first import it using options on the System > File Management > External
Monitor Program File List > Import screen. The BIG-IP system automatically
places the file in the proper location on the file system.
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run. If this parameter is not provided when creating
a new monitor, then the default value will be 5. This value B(must)
be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request.
- If the target responds within the set time period, it is considered up.
- If the target does not respond within the set time period, it is considered
down.
- You can change this number to any number you want, however, it should be
3 times the interval number of seconds plus 1 second.
- If this parameter is not provided when creating a new monitor, then the
default value will be C(16).
variables:
description:
- Specifies any variables that the script requires.
- Note that double quotes in values will be suppressed.
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the monitor exists.
- When C(absent), ensures the monitor is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create an external monitor
bigip_monitor_external:
name: foo
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Create an external monitor with variables
bigip_monitor_external:
name: foo
timeout: 10
variables:
var1: foo
var2: bar
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Add a variable to an existing set
bigip_monitor_external:
name: foo
timeout: 10
variables:
var1: foo
var2: bar
cat: dog
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: str
sample: external
description:
description: The description of the monitor.
returned: changed
type: str
sample: Important Monitor
ip:
description: The new IP of IP/port definition.
returned: changed
type: str
sample: 10.12.13.14
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.six import iteritems
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import compare_dictionary
from library.module_utils.network.f5.ipaddress import is_valid_ip
from library.module_utils.network.f5.compare import cmp_str_with_none
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import compare_dictionary
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
from ansible.module_utils.network.f5.compare import cmp_str_with_none
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'apiRawValues': 'variables',
'run': 'external_program',
'args': 'arguments',
}
api_attributes = [
'defaultsFrom',
'interval',
'timeout',
'destination',
'run',
'args',
'description',
]
returnables = [
'parent',
'ip',
'port',
'interval',
'timeout',
'variables',
'external_program',
'arguments',
'description',
]
updatables = [
'destination',
'interval',
'timeout',
'variables',
'external_program',
'arguments',
'description',
]
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def interval(self):
if self._values['interval'] is None:
return None
# Per BZ617284, the BIG-IP UI does not raise a warning about this.
# So I do
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
if self._values['ip'] in ['*', '0.0.0.0']:
return '*'
elif is_valid_ip(self._values['ip']):
return self._values['ip']
else:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def type(self):
return 'external'
class ApiParameters(Parameters):
@property
def description(self):
if self._values['description'] in [None, 'none']:
return None
return self._values['description']
@property
def variables(self):
if self._values['variables'] is None:
return None
pattern = r'^userDefined\s(?P<key>.*)'
result = {}
for k, v in iteritems(self._values['variables']):
matches = re.match(pattern, k)
if not matches:
raise F5ModuleError(
"Unable to find the variable 'key' in the API payload."
)
key = matches.group('key')
result[key] = v
return result
class ModuleParameters(Parameters):
@property
def description(self):
if self._values['description'] is None:
return None
elif self._values['description'] in ['none', '']:
return ''
return self._values['description']
@property
def variables(self):
if self._values['variables'] is None:
return None
result = {}
for k, v in iteritems(self._values['variables']):
result[k] = str(v).replace('"', '')
return result
@property
def external_program(self):
if self._values['external_program'] is None:
return None
return fq_name(self.partition, self._values['external_program'])
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
@property
def variables(self):
if self.want.variables is None:
return None
if self.have.variables is None:
return dict(
variables=self.want.variables
)
result = dict()
different = compare_dictionary(self.want.variables, self.have.variables)
if not different:
return None
for k, v in iteritems(self.want.variables):
if k in self.have.variables and v != self.have.variables[k]:
result[k] = v
elif k not in self.have.variables:
result[k] = v
for k, v in iteritems(self.have.variables):
if k not in self.want.variables:
result[k] = "none"
if result:
result = dict(
variables=result
)
return result
@property
def description(self):
return cmp_str_with_none(self.want.description, self.have.description)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
self._set_default_creation_values()
if self.module.check_mode:
return True
self.create_on_device()
return True
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 16})
if self.want.interval is None:
self.want.update({'interval': 5})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.want.variables:
self.set_variable_on_device(self.want.variables)
def set_variable_on_device(self, commands):
command = ' '.join(['user-defined {0} \\\"{1}\\\"'.format(k, v) for k, v in iteritems(commands)])
command = 'tmsh modify ltm monitor external {0} {1}'.format(self.want.name, command)
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(command)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
if params:
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.changes.variables:
self.set_variable_on_device(self.changes.variables)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/external'),
description=dict(),
arguments=dict(),
ip=dict(),
port=dict(),
external_program=dict(),
interval=dict(type='int'),
timeout=dict(type='int'),
state=dict(
default='present',
choices=['present', 'absent']
),
variables=dict(type='dict'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
imsparsh/python-for-android | python3-alpha/python3-src/Lib/test/test_msilib.py | 172 | 1468 | """ Test suite for the code in msilib """
import unittest
import os
from test.support import run_unittest, import_module
msilib = import_module('msilib')
class Test_make_id(unittest.TestCase):
#http://msdn.microsoft.com/en-us/library/aa369212(v=vs.85).aspx
"""The Identifier data type is a text string. Identifiers may contain the
ASCII characters A-Z (a-z), digits, underscores (_), or periods (.).
However, every identifier must begin with either a letter or an
underscore.
"""
def test_is_no_change_required(self):
self.assertEqual(
msilib.make_id("short"), "short")
self.assertEqual(
msilib.make_id("nochangerequired"), "nochangerequired")
self.assertEqual(
msilib.make_id("one.dot"), "one.dot")
self.assertEqual(
msilib.make_id("_"), "_")
self.assertEqual(
msilib.make_id("a"), "a")
#self.assertEqual(
# msilib.make_id(""), "")
def test_invalid_first_char(self):
self.assertEqual(
msilib.make_id("9.short"), "_9.short")
self.assertEqual(
msilib.make_id(".short"), "_.short")
def test_invalid_any_char(self):
self.assertEqual(
msilib.make_id(".s\x82ort"), "_.s_ort")
self.assertEqual (
msilib.make_id(".s\x82o?*+rt"), "_.s_o___rt")
def test_main():
run_unittest(__name__)
if __name__ == '__main__':
test_main()
| apache-2.0 |
P2poolBrasil/p2pool-dilmacoin | nattraverso/ipdiscover.py | 288 | 4180 | """
Generic methods to retreive the IP address of the local machine.
TODO: Example
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
import random, socket, logging, itertools
from twisted.internet import defer, reactor
from twisted.internet.protocol import DatagramProtocol
from twisted.internet.error import CannotListenError
from nattraverso.utils import is_rfc1918_ip, is_bogus_ip
@defer.inlineCallbacks
def get_local_ip():
"""
Returns a deferred which will be called with a
2-uple (lan_flag, ip_address) :
- lan_flag:
- True if it's a local network (RFC1918)
- False if it's a WAN address
- ip_address is the actual ip address
@return: A deferred called with the above defined tuple
@rtype: L{twisted.internet.defer.Deferred}
"""
# first we try a connected udp socket, then via multicast
logging.debug("Resolving dns to get udp ip")
try:
ipaddr = yield reactor.resolve('A.ROOT-SERVERS.NET')
except:
pass
else:
udpprot = DatagramProtocol()
port = reactor.listenUDP(0, udpprot)
udpprot.transport.connect(ipaddr, 7)
localip = udpprot.transport.getHost().host
port.stopListening()
if is_bogus_ip(localip):
raise RuntimeError, "Invalid IP address returned"
else:
defer.returnValue((is_rfc1918_ip(localip), localip))
logging.debug("Multicast ping to retrieve local IP")
ipaddr = yield _discover_multicast()
defer.returnValue((is_rfc1918_ip(ipaddr), ipaddr))
@defer.inlineCallbacks
def get_external_ip():
"""
Returns a deferred which will be called with a
2-uple (wan_flag, ip_address):
- wan_flag:
- True if it's a WAN address
- False if it's a LAN address
- None if it's a localhost (127.0.0.1) address
- ip_address: the most accessible ip address of this machine
@return: A deferred called with the above defined tuple
@rtype: L{twisted.internet.defer.Deferred}
"""
try:
local, ipaddr = yield get_local_ip()
except:
defer.returnValue((None, "127.0.0.1"))
if not local:
defer.returnValue((True, ipaddr))
logging.debug("Got local ip, trying to use upnp to get WAN ip")
import nattraverso.pynupnp
try:
ipaddr2 = yield nattraverso.pynupnp.get_external_ip()
except:
defer.returnValue((False, ipaddr))
else:
defer.returnValue((True, ipaddr2))
class _LocalNetworkMulticast(DatagramProtocol):
def __init__(self, nonce):
from p2pool.util import variable
self.nonce = nonce
self.address_received = variable.Event()
def datagramReceived(self, dgram, addr):
"""Datagram received, we callback the IP address."""
logging.debug("Received multicast pong: %s; addr:%r", dgram, addr)
if dgram != self.nonce:
return
self.address_received.happened(addr[0])
@defer.inlineCallbacks
def _discover_multicast():
"""
Local IP discovery protocol via multicast:
- Broadcast 3 ping multicast packet with "ping" in it
- Wait for an answer
- Retrieve the ip address from the returning packet, which is ours
"""
nonce = str(random.randrange(2**64))
p = _LocalNetworkMulticast(nonce)
for attempt in itertools.count():
port = 11000 + random.randint(0, 5000)
try:
mcast = reactor.listenMulticast(port, p)
except CannotListenError:
if attempt >= 10:
raise
continue
else:
break
try:
yield mcast.joinGroup('239.255.255.250', socket.INADDR_ANY)
logging.debug("Sending multicast ping")
for i in xrange(3):
p.transport.write(nonce, ('239.255.255.250', port))
address, = yield p.address_received.get_deferred(5)
finally:
mcast.stopListening()
defer.returnValue(address)
| gpl-3.0 |
chacoroot/planetary | addons/project/tests/test_project_flow.py | 116 | 8790 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.project.tests.test_project_base import TestProjectBase
from openerp.exceptions import AccessError
from openerp.tools import mute_logger
EMAIL_TPL = """Return-Path: <whatever-2a840@postmaster.twitter.com>
X-Original-To: {email_to}
Delivered-To: {email_to}
To: {email_to}
Received: by mail1.openerp.com (Postfix, from userid 10002)
id 5DF9ABFB2A; Fri, 10 Aug 2012 16:16:39 +0200 (CEST)
Message-ID: {msg_id}
Date: Tue, 29 Nov 2011 12:43:21 +0530
From: {email_from}
MIME-Version: 1.0
Subject: {subject}
Content-Type: text/plain; charset=ISO-8859-1; format=flowed
Hello,
This email should create a new entry in your module. Please check that it
effectively works.
Thanks,
--
Raoul Boitempoils
Integrator at Agrolait"""
class TestProjectFlow(TestProjectBase):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_project_process(self):
""" Testing project management """
cr, uid, user_projectuser_id, user_projectmanager_id, project_pigs_id = self.cr, self.uid, self.user_projectuser_id, self.user_projectmanager_id, self.project_pigs_id
# ProjectUser: set project as template -> raise
self.assertRaises(AccessError, self.project_project.set_template, cr, user_projectuser_id, [project_pigs_id])
# Other tests are done using a ProjectManager
project = self.project_project.browse(cr, user_projectmanager_id, project_pigs_id)
self.assertNotEqual(project.state, 'template', 'project: incorrect state, should not be a template')
# Set test project as template
self.project_project.set_template(cr, user_projectmanager_id, [project_pigs_id])
project.refresh()
self.assertEqual(project.state, 'template', 'project: set_template: project state should be template')
self.assertEqual(len(project.tasks), 0, 'project: set_template: project tasks should have been set inactive')
# Duplicate template
new_template_act = self.project_project.duplicate_template(cr, user_projectmanager_id, [project_pigs_id])
new_project = self.project_project.browse(cr, user_projectmanager_id, new_template_act['res_id'])
self.assertEqual(new_project.state, 'open', 'project: incorrect duplicate_template')
self.assertEqual(len(new_project.tasks), 2, 'project: duplicating a project template should duplicate its tasks')
# Convert into real project
self.project_project.reset_project(cr, user_projectmanager_id, [project_pigs_id])
project.refresh()
self.assertEqual(project.state, 'open', 'project: resetted project should be in open state')
self.assertEqual(len(project.tasks), 2, 'project: reset_project: project tasks should have been set active')
# Put as pending
self.project_project.set_pending(cr, user_projectmanager_id, [project_pigs_id])
project.refresh()
self.assertEqual(project.state, 'pending', 'project: should be in pending state')
# Re-open
self.project_project.set_open(cr, user_projectmanager_id, [project_pigs_id])
project.refresh()
self.assertEqual(project.state, 'open', 'project: reopened project should be in open state')
# Close project
self.project_project.set_done(cr, user_projectmanager_id, [project_pigs_id])
project.refresh()
self.assertEqual(project.state, 'close', 'project: closed project should be in close state')
# Re-open
self.project_project.set_open(cr, user_projectmanager_id, [project_pigs_id])
project.refresh()
# Re-convert into a template and schedule tasks
self.project_project.set_template(cr, user_projectmanager_id, [project_pigs_id])
self.project_project.schedule_tasks(cr, user_projectmanager_id, [project_pigs_id])
# Copy the project
new_project_id = self.project_project.copy(cr, user_projectmanager_id, project_pigs_id)
new_project = self.project_project.browse(cr, user_projectmanager_id, new_project_id)
self.assertEqual(len(new_project.tasks), 2, 'project: copied project should have copied task')
# Cancel the project
self.project_project.set_cancel(cr, user_projectmanager_id, [project_pigs_id])
self.assertEqual(project.state, 'cancelled', 'project: cancelled project should be in cancel state')
def test_10_task_process(self):
""" Testing task creation and management """
cr, uid, user_projectuser_id, user_projectmanager_id, project_pigs_id = self.cr, self.uid, self.user_projectuser_id, self.user_projectmanager_id, self.project_pigs_id
def format_and_process(template, email_to='project+pigs@mydomain.com, other@gmail.com', subject='Frogs',
email_from='Patrick Ratatouille <patrick.ratatouille@agrolait.com>',
msg_id='<1198923581.41972151344608186760.JavaMail@agrolait.com>'):
self.assertEqual(self.project_task.search(cr, uid, [('name', '=', subject)]), [])
mail = template.format(email_to=email_to, subject=subject, email_from=email_from, msg_id=msg_id)
self.mail_thread.message_process(cr, uid, None, mail)
return self.project_task.search(cr, uid, [('name', '=', subject)])
# Do: incoming mail from an unknown partner on an alias creates a new task 'Frogs'
frogs = format_and_process(EMAIL_TPL)
# Test: one task created by mailgateway administrator
self.assertEqual(len(frogs), 1, 'project: message_process: a new project.task should have been created')
task = self.project_task.browse(cr, user_projectuser_id, frogs[0])
res = self.project_task.get_metadata(cr, uid, [task.id])[0].get('create_uid') or [None]
self.assertEqual(res[0], uid,
'project: message_process: task should have been created by uid as alias_user_id is False on the alias')
# Test: messages
self.assertEqual(len(task.message_ids), 3,
'project: message_process: newly created task should have 2 messages: creation and email')
self.assertEqual(task.message_ids[2].subtype_id.name, 'Task Created',
'project: message_process: first message of new task should have Task Created subtype')
self.assertEqual(task.message_ids[1].subtype_id.name, 'Task Assigned',
'project: message_process: first message of new task should have Task Created subtype')
self.assertEqual(task.message_ids[0].author_id.id, self.email_partner_id,
'project: message_process: second message should be the one from Agrolait (partner failed)')
self.assertEqual(task.message_ids[0].subject, 'Frogs',
'project: message_process: second message should be the one from Agrolait (subject failed)')
# Test: task content
self.assertEqual(task.name, 'Frogs', 'project_task: name should be the email subject')
self.assertEqual(task.project_id.id, self.project_pigs_id, 'project_task: incorrect project')
self.assertEqual(task.stage_id.sequence, 1, 'project_task: should have a stage with sequence=1')
# Open the delegation wizard
delegate_id = self.project_task_delegate.create(cr, user_projectuser_id, {
'user_id': user_projectuser_id,
'planned_hours': 12.0,
'planned_hours_me': 2.0,
}, {'active_id': task.id})
self.project_task_delegate.delegate(cr, user_projectuser_id, [delegate_id], {'active_id': task.id})
# Check delegation details
task.refresh()
self.assertEqual(task.planned_hours, 2, 'project_task_delegate: planned hours is not correct after delegation')
| agpl-3.0 |
wq/wq.io | itertable/loaders.py | 1 | 4908 | from __future__ import print_function
import requests
try:
# Python 2 (uses str)
from StringIO import StringIO
except ImportError:
# Python 3 (Python 2 equivalent uses unicode)
from io import StringIO
from io import BytesIO
from .version import VERSION
from .exceptions import LoadFailed
from zipfile import ZipFile
class BaseLoader(object):
no_pickle_loader = ['file']
empty_file = None
def load(self):
raise NotImplementedError
class FileLoader(BaseLoader):
filename = None
@property
def read_mode(self):
return 'rb' if self.binary else 'r'
@property
def write_mode(self):
return 'wb+' if self.binary else 'w+'
def load(self):
try:
self.file = open(self.filename, self.read_mode)
self.empty_file = False
except IOError:
if self.binary:
self.file = BytesIO()
else:
self.file = StringIO()
self.empty_file = True
def save(self):
file = open(self.filename, self.write_mode)
self.dump(file)
file.close()
class Zipper(object):
inner_filename = None
inner_binary = False
def unzip_file(self):
zipfile = ZipFile(self.file)
inner_file = zipfile.read(
self.get_inner_filename(zipfile)
)
if self.inner_binary:
self.file = BytesIO(inner_file)
else:
self.file = StringIO(inner_file.decode('utf-8'))
zipfile.fp.close()
zipfile.close()
def get_inner_filename(self, zipfile):
if self.inner_filename:
return self.inner_filename
names = zipfile.namelist()
if len(names) == 1:
return names[0]
zipfile.fp.close()
zipfile.close()
raise LoadFailed("Multiple Inner Files!")
class ZipFileLoader(Zipper, FileLoader):
binary = True
def load(self):
super(ZipFileLoader, self).load()
self.unzip_file()
class StringLoader(BaseLoader):
string = ""
@property
def _io_class(self):
return BytesIO if self.binary else StringIO
def load(self):
if self.binary and not self.string:
self.string = b''
self.file = self._io_class(self.string)
def save(self):
file = self._io_class()
self.dump(file)
self.string = file.getvalue()
file.close()
class NetLoader(StringLoader):
"NetLoader: opens HTTP/REST resources for use in IterTable"
username = None
password = None
debug = False
url = None
client = requests
@property
def user_agent(self):
return "IterTable/%s (%s)" % (
VERSION,
requests.utils.default_user_agent()
)
@property
def headers(self):
return {
'User-Agent': self.user_agent,
}
def load(self, **kwargs):
result = self.GET()
self.file = self._io_class(result)
def req(self, url=None, method=None, params=None, body=None, headers={}):
if url is None:
url = self.url
if url is None:
raise LoadFailed("No URL provided")
if params is None:
params = getattr(self, 'params', None)
if isinstance(params, str):
url += '?' + params
params = None
if self.debug:
if params:
from requests.compat import urlencode
debug_url = url + '?' + urlencode(params, doseq=True)
else:
debug_url = url
self.debug_string = "%s: %s" % (method, debug_url)
print(self.debug_string)
if self.username is not None and self.password is not None:
auth = (self.username, self.password)
else:
auth = None
all_headers = self.headers.copy()
all_headers.update(headers)
resp = self.client.request(
method, url,
params=params,
headers=all_headers,
auth=auth,
data=body,
)
resp.connection.close()
if resp.status_code < 200 or resp.status_code > 299:
raise LoadFailed(
resp.text,
path=url,
code=resp.status_code,
)
if self.binary:
return resp.content
else:
return resp.text
def GET(self, **kwargs):
return self.req(method='GET', **kwargs)
def POST(self, **kwargs):
return self.req(method='POST', **kwargs)
def PUT(self, **kwargs):
return self.req(method='PUT', **kwargs)
def DELETE(self, **kwargs):
return self.req(method='DELETE', **kwargs)
class ZipNetLoader(Zipper, NetLoader):
binary = True
def load(self):
super(ZipNetLoader, self).load()
self.unzip_file()
| mit |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.1/Lib/lib-old/cmp.py | 14 | 1975 | """Efficiently compare files, boolean outcome only (equal / not equal).
Tricks (used in this order):
- Files with identical type, size & mtime are assumed to be clones
- Files with different type or size cannot be identical
- We keep a cache of outcomes of earlier comparisons
- We don't fork a process to run 'cmp' but read the files ourselves
"""
import os
cache = {}
def cmp(f1, f2, shallow=1):
"""Compare two files, use the cache if possible.
Return 1 for identical files, 0 for different.
Raise exceptions if either file could not be statted, read, etc."""
s1, s2 = sig(os.stat(f1)), sig(os.stat(f2))
if s1[0] != 8 or s2[0] != 8:
# Either is a not a plain file -- always report as different
return 0
if shallow and s1 == s2:
# type, size & mtime match -- report same
return 1
if s1[:2] != s2[:2]: # Types or sizes differ, don't bother
# types or sizes differ -- report different
return 0
# same type and size -- look in the cache
key = (f1, f2)
try:
cs1, cs2, outcome = cache[key]
# cache hit
if s1 == cs1 and s2 == cs2:
# cached signatures match
return outcome
# stale cached signature(s)
except KeyError:
# cache miss
pass
# really compare
outcome = do_cmp(f1, f2)
cache[key] = s1, s2, outcome
return outcome
def sig(st):
"""Return signature (i.e., type, size, mtime) from raw stat data
0-5: st_mode, st_ino, st_dev, st_nlink, st_uid, st_gid
6-9: st_size, st_atime, st_mtime, st_ctime"""
type = st[0] / 4096
size = st[6]
mtime = st[8]
return type, size, mtime
def do_cmp(f1, f2):
"""Compare two files, really."""
bufsize = 8*1024 # Could be tuned
fp1 = open(f1, 'rb')
fp2 = open(f2, 'rb')
while 1:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2: return 0
if not b1: return 1
| mit |
holdenk/spark | python/pyspark/sql/tests/test_datasources.py | 22 | 7577 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import shutil
import tempfile
from pyspark.sql import Row
from pyspark.sql.types import IntegerType, StructField, StructType, LongType, StringType
from pyspark.testing.sqlutils import ReusedSQLTestCase
class DataSourcesTests(ReusedSQLTestCase):
def test_linesep_text(self):
df = self.spark.read.text("python/test_support/sql/ages_newlines.csv", lineSep=",")
expected = [Row(value=u'Joe'), Row(value=u'20'), Row(value=u'"Hi'),
Row(value=u'\nI am Jeo"\nTom'), Row(value=u'30'),
Row(value=u'"My name is Tom"\nHyukjin'), Row(value=u'25'),
Row(value=u'"I am Hyukjin\n\nI love Spark!"\n')]
self.assertEqual(df.collect(), expected)
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
df.write.text(tpath, lineSep="!")
expected = [Row(value=u'Joe!20!"Hi!'), Row(value=u'I am Jeo"'),
Row(value=u'Tom!30!"My name is Tom"'),
Row(value=u'Hyukjin!25!"I am Hyukjin'),
Row(value=u''), Row(value=u'I love Spark!"'),
Row(value=u'!')]
readback = self.spark.read.text(tpath)
self.assertEqual(readback.collect(), expected)
finally:
shutil.rmtree(tpath)
def test_multiline_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
multiLine=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_encoding_json(self):
people_array = self.spark.read\
.json("python/test_support/sql/people_array_utf16le.json",
multiLine=True, encoding="UTF-16LE")
expected = [Row(age=30, name=u'Andy'), Row(age=19, name=u'Justin')]
self.assertEqual(people_array.collect(), expected)
def test_linesep_json(self):
df = self.spark.read.json("python/test_support/sql/people.json", lineSep=",")
expected = [Row(_corrupt_record=None, name=u'Michael'),
Row(_corrupt_record=u' "age":30}\n{"name":"Justin"', name=None),
Row(_corrupt_record=u' "age":19}\n', name=None)]
self.assertEqual(df.collect(), expected)
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
df = self.spark.read.json("python/test_support/sql/people.json")
df.write.json(tpath, lineSep="!!")
readback = self.spark.read.json(tpath, lineSep="!!")
self.assertEqual(readback.collect(), df.collect())
finally:
shutil.rmtree(tpath)
def test_multiline_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", multiLine=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEqual(count, 4)
def test_json_sampling_ratio(self):
rdd = self.spark.sparkContext.range(0, 100, 1, 1) \
.map(lambda x: '{"a":0.1}' if x == 1 else '{"a":%s}' % str(x))
schema = self.spark.read.option('inferSchema', True) \
.option('samplingRatio', 0.5) \
.json(rdd).schema
self.assertEqual(schema, StructType([StructField("a", LongType(), True)]))
def test_csv_sampling_ratio(self):
rdd = self.spark.sparkContext.range(0, 100, 1, 1) \
.map(lambda x: '0.1' if x == 1 else str(x))
schema = self.spark.read.option('inferSchema', True)\
.csv(rdd, samplingRatio=0.5).schema
self.assertEqual(schema, StructType([StructField("_c0", IntegerType(), True)]))
def test_checking_csv_header(self):
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.createDataFrame([[1, 1000], [2000, 2]])\
.toDF('f1', 'f2').write.option("header", "true").csv(path)
schema = StructType([
StructField('f2', IntegerType(), nullable=True),
StructField('f1', IntegerType(), nullable=True)])
df = self.spark.read.option('header', 'true').schema(schema)\
.csv(path, enforceSchema=False)
self.assertRaisesRegex(
Exception,
"CSV header does not conform to the schema",
lambda: df.collect())
finally:
shutil.rmtree(path)
def test_ignore_column_of_all_nulls(self):
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
df = self.spark.createDataFrame([["""{"a":null, "b":1, "c":3.0}"""],
["""{"a":null, "b":null, "c":"string"}"""],
["""{"a":null, "b":null, "c":null}"""]])
df.write.text(path)
schema = StructType([
StructField('b', LongType(), nullable=True),
StructField('c', StringType(), nullable=True)])
readback = self.spark.read.json(path, dropFieldIfAllNull=True)
self.assertEqual(readback.schema, schema)
finally:
shutil.rmtree(path)
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_datasources import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
AttakornP/request_manager | request_manager/request_manager/settings/local.py | 1 | 1799 | """Development settings and globals."""
from os.path import join, normpath
from base import *
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'rq_mng_db',
'USER': 'www-data',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
########## END CACHE CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INSTALLED_APPS += (
#'debug_toolbar',
)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INTERNAL_IPS = ('127.0.0.1',)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
MIDDLEWARE_CLASSES += (
#'debug_toolbar.middleware.DebugToolbarMiddleware',
)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TEMPLATE_CONTEXT': True,
}
########## END TOOLBAR CONFIGURATION
| mit |
Teagan42/home-assistant | tests/components/nuheat/test_init.py | 5 | 1461 | """NuHeat component tests."""
import unittest
from unittest.mock import patch
from homeassistant.components import nuheat
from tests.common import MockDependency, get_test_home_assistant
VALID_CONFIG = {
"nuheat": {"username": "warm", "password": "feet", "devices": "thermostat123"}
}
class TestNuHeat(unittest.TestCase):
"""Test the NuHeat component."""
def setUp(self): # pylint: disable=invalid-name
"""Initialize the values for this test class."""
self.hass = get_test_home_assistant()
self.config = VALID_CONFIG
def tearDown(self): # pylint: disable=invalid-name
"""Teardown this test class. Stop hass."""
self.hass.stop()
@MockDependency("nuheat")
@patch("homeassistant.helpers.discovery.load_platform")
def test_setup(self, mocked_nuheat, mocked_load):
"""Test setting up the NuHeat component."""
with patch.object(nuheat, "nuheat", mocked_nuheat):
nuheat.setup(self.hass, self.config)
mocked_nuheat.NuHeat.assert_called_with("warm", "feet")
assert nuheat.DOMAIN in self.hass.data
assert len(self.hass.data[nuheat.DOMAIN]) == 2
assert isinstance(
self.hass.data[nuheat.DOMAIN][0], type(mocked_nuheat.NuHeat())
)
assert self.hass.data[nuheat.DOMAIN][1] == "thermostat123"
mocked_load.assert_called_with(
self.hass, "climate", nuheat.DOMAIN, {}, self.config
)
| apache-2.0 |
iRGBit/QGIS | python/plugins/processing/algs/gdal/ogrinfo.py | 8 | 2423 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ogrinfo.py
---------------------
Date : November 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'November 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.core.parameters import ParameterVector
from processing.core.outputs import OutputHTML
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.algs.gdal.OgrAlgorithm import OgrAlgorithm
class OgrInfo(OgrAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Information')
self.group, self.i18n_group = self.trAlgorithm('[OGR] Miscellaneous')
self.addParameter(ParameterVector(self.INPUT, self.tr('Input layer'),
[ParameterVector.VECTOR_TYPE_ANY], False))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Layer information')))
def getConsoleCommands(self):
arguments = ["ogrinfo"]
arguments.append('-al')
arguments.append('-so')
layer = self.getParameterValue(self.INPUT)
conn = self.ogrConnectionString(layer)
arguments.append(conn)
return arguments
def processAlgorithm(self, progress):
GdalUtils.runGdal(self.getConsoleCommands(), progress)
output = self.getOutputValue(self.OUTPUT)
f = open(output, 'w')
f.write('<pre>')
for s in GdalUtils.getConsoleOutput()[1:]:
f.write(unicode(s))
f.write('</pre>')
f.close()
| gpl-2.0 |
NathanW2/QGIS | python/plugins/db_manager/db_plugins/vlayers/connector.py | 9 | 13705 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : Virtual layers plugin for DB Manager
Date : December 2015
copyright : (C) 2015 by Hugo Mercier
email : hugo dot mercier at oslandia dot com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtCore import QUrl, QTemporaryFile
from ..connector import DBConnector
from ..plugin import Table
from qgis.core import QgsDataSourceUri, QgsVirtualLayerDefinition, QgsProject, QgsMapLayer, QgsVectorLayer, QgsCoordinateReferenceSystem, QgsWkbTypes
import sqlite3
class sqlite3_connection(object):
def __init__(self, sqlite_file):
self.conn = sqlite3.connect(sqlite_file)
def __enter__(self):
return self.conn
def __exit__(self, type, value, traceback):
self.conn.close()
def getQueryGeometryName(sqlite_file):
# introspect the file
with sqlite3_connection(sqlite_file) as conn:
c = conn.cursor()
for r in c.execute("SELECT url FROM _meta"):
d = QgsVirtualLayerDefinition.fromUrl(QUrl(r[0]))
if d.hasDefinedGeometry():
return d.geometryField()
return None
def classFactory():
return VLayerConnector
# Tables in DB Manager are identified by their display names
# This global registry maps a display name with a layer id
# It is filled when getVectorTables is called
class VLayerRegistry(object):
_instance = None
@classmethod
def instance(cls):
if cls._instance is None:
cls._instance = VLayerRegistry()
return cls._instance
def __init__(self):
self.layers = {}
def reset(self):
self.layers = {}
def has(self, k):
return k in self.layers
def get(self, k):
return self.layers.get(k)
def __getitem__(self, k):
return self.get(k)
def set(self, k, l):
self.layers[k] = l
def __setitem__(self, k, l):
self.set(k, l)
def items(self):
return list(self.layers.items())
def getLayer(self, l):
lid = self.layers.get(l)
if lid is None:
return lid
if lid not in QgsProject.instance().mapLayers().keys():
self.layers.pop(l)
return None
return QgsProject.instance().mapLayer(lid)
class VLayerConnector(DBConnector):
def __init__(self, uri):
pass
def _execute(self, cursor, sql):
# This is only used to get list of fields
class DummyCursor(object):
def __init__(self, sql):
self.sql = sql
def close(self):
pass
return DummyCursor(sql)
def _get_cursor(self, name=None):
# fix_print_with_import
print(("_get_cursor_", name))
def _get_cursor_columns(self, c):
tf = QTemporaryFile()
tf.open()
tmp = tf.fileName()
tf.close()
df = QgsVirtualLayerDefinition()
df.setFilePath(tmp)
df.setQuery(c.sql)
p = QgsVectorLayer(df.toString(), "vv", "virtual")
if not p.isValid():
return []
f = [f.name() for f in p.fields()]
if p.geometryType() != QgsWkbTypes.NullGeometry:
gn = getQueryGeometryName(tmp)
if gn:
f += [gn]
return f
def uri(self):
return QgsDataSourceUri("qgis")
def getInfo(self):
return "info"
def getSpatialInfo(self):
return None
def hasSpatialSupport(self):
return True
def hasRasterSupport(self):
return False
def hasCustomQuerySupport(self):
return True
def hasTableColumnEditingSupport(self):
return False
def fieldTypes(self):
return [
"integer", "bigint", "smallint", # integers
"real", "double", "float", "numeric", # floats
"varchar", "varchar(255)", "character(20)", "text", # strings
"date", "datetime" # date/time
]
def getSchemas(self):
return None
def getTables(self, schema=None, add_sys_tables=False):
""" get list of tables """
return self.getVectorTables()
def getVectorTables(self, schema=None):
""" get list of table with a geometry column
it returns:
name (table name)
is_system_table
type = 'view' (is a view?)
geometry_column:
f_table_name (the table name in geometry_columns may be in a wrong case, use this to load the layer)
f_geometry_column
type
coord_dimension
srid
"""
reg = VLayerRegistry.instance()
VLayerRegistry.instance().reset()
lst = []
for _, l in list(QgsProject.instance().mapLayers().items()):
if l.type() == QgsMapLayer.VectorLayer:
lname = l.name()
# if there is already a layer with this name, use the layer id
# as name
if reg.has(lname):
lname = l.id()
VLayerRegistry.instance().set(lname, l.id())
geomType = None
dim = None
g = l.dataProvider().wkbType()
if g == QgsWkbTypes.Point:
geomType = 'POINT'
dim = 'XY'
elif g == QgsWkbTypes.LineString:
geomType = 'LINESTRING'
dim = 'XY'
elif g == QgsWkbTypes.Polygon:
geomType = 'POLYGON'
dim = 'XY'
elif g == QgsWkbTypes.MultiPoint:
geomType = 'MULTIPOINT'
dim = 'XY'
elif g == QgsWkbTypes.MultiLineString:
geomType = 'MULTILINESTRING'
dim = 'XY'
elif g == QgsWkbTypes.MultiPolygon:
geomType = 'MULTIPOLYGON'
dim = 'XY'
elif g == QgsWkbTypes.Point25D:
geomType = 'POINT'
dim = 'XYZ'
elif g == QgsWkbTypes.LineString25D:
geomType = 'LINESTRING'
dim = 'XYZ'
elif g == QgsWkbTypes.Polygon25D:
geomType = 'POLYGON'
dim = 'XYZ'
elif g == QgsWkbTypes.MultiPoint25D:
geomType = 'MULTIPOINT'
dim = 'XYZ'
elif g == QgsWkbTypes.MultiLineString25D:
geomType = 'MULTILINESTRING'
dim = 'XYZ'
elif g == QgsWkbTypes.MultiPolygon25D:
geomType = 'MULTIPOLYGON'
dim = 'XYZ'
lst.append(
(Table.VectorType, lname, False, False, l.id(), 'geometry', geomType, dim, l.crs().postgisSrid()))
return lst
def getRasterTables(self, schema=None):
return []
def getTableRowCount(self, table):
t = table[1]
l = VLayerRegistry.instance().getLayer(t)
if not l or not l.isValid():
return None
return l.featureCount()
def getTableFields(self, table):
""" return list of columns in table """
t = table[1]
l = VLayerRegistry.instance().getLayer(t)
if not l or not l.isValid():
return []
# id, name, type, nonnull, default, pk
n = l.dataProvider().fields().size()
f = [(i, f.name(), f.typeName(), False, None, False)
for i, f in enumerate(l.dataProvider().fields())]
f += [(n, "geometry", "geometry", False, None, False)]
return f
def getTableIndexes(self, table):
return []
def getTableConstraints(self, table):
return None
def getTableTriggers(self, table):
return []
def deleteTableTrigger(self, trigger, table=None):
return
def getTableExtent(self, table, geom):
is_id, t = table
if is_id:
l = QgsProject.instance().mapLayer(t)
else:
l = VLayerRegistry.instance().getLayer(t)
if not l or not l.isValid():
return None
e = l.extent()
r = (e.xMinimum(), e.yMinimum(), e.xMaximum(), e.yMaximum())
return r
def getViewDefinition(self, view):
print("**unimplemented** getViewDefinition")
def getSpatialRefInfo(self, srid):
crs = QgsCoordinateReferenceSystem(srid)
return crs.description()
def isVectorTable(self, table):
return True
def isRasterTable(self, table):
return False
def createTable(self, table, field_defs, pkey):
print("**unimplemented** createTable")
return False
def deleteTable(self, table):
print("**unimplemented** deleteTable")
return False
def emptyTable(self, table):
print("**unimplemented** emptyTable")
return False
def renameTable(self, table, new_table):
print("**unimplemented** renameTable")
return False
def moveTable(self, table, new_table, new_schema=None):
print("**unimplemented** moveTable")
return False
def createView(self, view, query):
print("**unimplemented** createView")
return False
def deleteView(self, view):
print("**unimplemented** deleteView")
return False
def renameView(self, view, new_name):
print("**unimplemented** renameView")
return False
def runVacuum(self):
print("**unimplemented** runVacuum")
return False
def addTableColumn(self, table, field_def):
print("**unimplemented** addTableColumn")
return False
def deleteTableColumn(self, table, column):
print("**unimplemented** deleteTableColumn")
def updateTableColumn(self, table, column, new_name, new_data_type=None, new_not_null=None, new_default=None):
print("**unimplemented** updateTableColumn")
def renameTableColumn(self, table, column, new_name):
print("**unimplemented** renameTableColumn")
return False
def setColumnType(self, table, column, data_type):
print("**unimplemented** setColumnType")
return False
def setColumnDefault(self, table, column, default):
print("**unimplemented** setColumnDefault")
return False
def setColumnNull(self, table, column, is_null):
print("**unimplemented** setColumnNull")
return False
def isGeometryColumn(self, table, column):
print("**unimplemented** isGeometryColumn")
return False
def addGeometryColumn(self, table, geom_column='geometry', geom_type='POINT', srid=-1, dim=2):
print("**unimplemented** addGeometryColumn")
return False
def deleteGeometryColumn(self, table, geom_column):
print("**unimplemented** deleteGeometryColumn")
return False
def addTableUniqueConstraint(self, table, column):
print("**unimplemented** addTableUniqueConstraint")
return False
def deleteTableConstraint(self, table, constraint):
print("**unimplemented** deleteTableConstraint")
return False
def addTablePrimaryKey(self, table, column):
print("**unimplemented** addTablePrimaryKey")
return False
def createTableIndex(self, table, name, column, unique=False):
print("**unimplemented** createTableIndex")
return False
def deleteTableIndex(self, table, name):
print("**unimplemented** deleteTableIndex")
return False
def createSpatialIndex(self, table, geom_column='geometry'):
print("**unimplemented** createSpatialIndex")
return False
def deleteSpatialIndex(self, table, geom_column='geometry'):
print("**unimplemented** deleteSpatialIndex")
return False
def hasSpatialIndex(self, table, geom_column='geometry'):
print("**unimplemented** hasSpatialIndex")
return False
def execution_error_types(self):
print("**unimplemented** execution_error_types")
return False
def connection_error_types(self):
print("**unimplemented** connection_error_types")
return False
def getSqlDictionary(self):
from .sql_dictionary import getSqlDictionary
sql_dict = getSqlDictionary()
items = []
for tbl in self.getTables():
items.append(tbl[1]) # table name
for fld in self.getTableFields((None, tbl[1])):
items.append(fld[1]) # field name
sql_dict["identifier"] = items
return sql_dict
def getQueryBuilderDictionary(self):
from .sql_dictionary import getQueryBuilderDictionary
return getQueryBuilderDictionary()
| gpl-2.0 |
tornadozou/tensorflow | tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_graph_test.py | 76 | 10239 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stochastic graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.contrib.bayesflow.python.ops import stochastic_graph_impl
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
st = stochastic_tensor
sg = stochastic_graph_impl
distributions = distributions_lib
class NormalNotParam(distributions.Normal):
@property
def reparameterization_type(self):
return distributions.NOT_REPARAMETERIZED
class TestSurrogateLosses(test.TestCase):
def testPathwiseDerivativeDoesNotAddSurrogateLosses(self):
with self.test_session():
mu = [0.0, 0.1, 0.2]
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.SampleValue()):
prior = st.StochasticTensor(distributions.Normal(loc=mu, scale=sigma))
likelihood = st.StochasticTensor(
distributions.Normal(
loc=prior, scale=sigma))
self.assertEqual(
prior.distribution.reparameterization_type,
distributions.FULLY_REPARAMETERIZED)
self.assertEqual(
likelihood.distribution.reparameterization_type,
distributions.FULLY_REPARAMETERIZED)
loss = math_ops.square(array_ops.identity(likelihood) - [0.0, 0.1, 0.2])
sum_loss = math_ops.reduce_sum(loss)
surrogate_loss = sg.surrogate_loss([loss])
with self.assertRaisesRegexp(ValueError, "dimensionality 1 or greater"):
_ = sg.surrogate_loss([sum_loss])
surrogate_from_both = sg.surrogate_loss(
[loss, sum_loss * array_ops.ones_like(loss)])
# Pathwise derivative terms do not require add'l surrogate loss terms.
with self.test_session() as sess:
self.assertAllClose(*sess.run([loss, surrogate_loss]))
self.assertAllClose(*sess.run([(loss + sum_loss), surrogate_from_both]))
def _testSurrogateLoss(self, session, losses, expected_addl_terms, xs):
surrogate_loss = sg.surrogate_loss(losses)
expected_surrogate_loss = math_ops.add_n(losses + expected_addl_terms)
self.assertAllClose(*session.run([surrogate_loss, expected_surrogate_loss]))
# Test backprop
expected_grads = gradients_impl.gradients(ys=expected_surrogate_loss, xs=xs)
surrogate_grads = gradients_impl.gradients(ys=surrogate_loss, xs=xs)
self.assertEqual(len(expected_grads), len(surrogate_grads))
grad_values = session.run(expected_grads + surrogate_grads)
n_grad = len(expected_grads)
self.assertAllClose(grad_values[:n_grad], grad_values[n_grad:])
def testSurrogateLoss(self):
with self.test_session() as sess:
mu = constant_op.constant([0.0, 0.1, 0.2])
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.SampleValue()):
prior = st.StochasticTensor(NormalNotParam(loc=mu, scale=sigma))
likelihood = st.StochasticTensor(NormalNotParam(loc=prior, scale=sigma))
prior_2 = st.StochasticTensor(NormalNotParam(loc=mu, scale=sigma))
loss = math_ops.square(array_ops.identity(likelihood) - mu)
part_loss = math_ops.square(array_ops.identity(prior) - mu)
sum_loss = math_ops.reduce_sum(loss)
loss_nodeps = math_ops.square(array_ops.identity(prior_2) - mu)
# For ground truth, use the stop-gradient versions of the losses
loss_nograd = array_ops.stop_gradient(loss)
loss_nodeps_nograd = array_ops.stop_gradient(loss_nodeps)
sum_loss_nograd = array_ops.stop_gradient(sum_loss)
# These score functions should ignore prior_2
self._testSurrogateLoss(
session=sess,
losses=[loss],
expected_addl_terms=[
likelihood.distribution.log_prob(
likelihood.value()) * loss_nograd,
prior.distribution.log_prob(prior.value()) * loss_nograd
],
xs=[mu, sigma])
self._testSurrogateLoss(
session=sess,
losses=[loss, part_loss],
expected_addl_terms=[
likelihood.distribution.log_prob(
likelihood.value()) * loss_nograd,
(prior.distribution.log_prob(prior.value()) *
array_ops.stop_gradient(part_loss + loss))
],
xs=[mu, sigma])
self._testSurrogateLoss(
session=sess,
losses=[sum_loss * array_ops.ones_like(loss)],
expected_addl_terms=[(
likelihood.distribution.log_prob(likelihood.value()) *
sum_loss_nograd), prior.distribution.log_prob(prior.value()) *
sum_loss_nograd],
xs=[mu, sigma])
self._testSurrogateLoss(
session=sess,
losses=[loss, sum_loss * array_ops.ones_like(loss)],
expected_addl_terms=[(
likelihood.distribution.log_prob(likelihood.value()) *
array_ops.stop_gradient(loss + sum_loss)),
(prior.distribution.log_prob(prior.value()) *
array_ops.stop_gradient(loss + sum_loss))],
xs=[mu, sigma])
# These score functions should ignore prior and likelihood
self._testSurrogateLoss(
session=sess,
losses=[loss_nodeps],
expected_addl_terms=[(prior_2.distribution.log_prob(prior_2.value()) *
loss_nodeps_nograd)],
xs=[mu, sigma])
# These score functions should include all terms selectively
self._testSurrogateLoss(
session=sess,
losses=[loss, loss_nodeps],
# We can't guarantee ordering of output losses in this case.
expected_addl_terms=[(
likelihood.distribution.log_prob(likelihood.value()) *
loss_nograd), prior.distribution.log_prob(prior.value()) *
loss_nograd,
(prior_2.distribution.log_prob(prior_2.value()) *
loss_nodeps_nograd)],
xs=[mu, sigma])
def testNoSurrogateLoss(self):
with self.test_session():
mu = constant_op.constant([0.0, 0.1, 0.2])
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.SampleValue()):
dt = st.StochasticTensor(
NormalNotParam(
loc=mu, scale=sigma), loss_fn=None)
self.assertEqual(None, dt.loss(constant_op.constant([2.0])))
def testExplicitStochasticTensors(self):
with self.test_session() as sess:
mu = constant_op.constant([0.0, 0.1, 0.2])
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.SampleValue()):
dt1 = st.StochasticTensor(NormalNotParam(loc=mu, scale=sigma))
dt2 = st.StochasticTensor(NormalNotParam(loc=mu, scale=sigma))
loss = math_ops.square(array_ops.identity(dt1)) + 10. + dt2
sl_all = sg.surrogate_loss([loss])
sl_dt1 = sg.surrogate_loss([loss], stochastic_tensors=[dt1])
sl_dt2 = sg.surrogate_loss([loss], stochastic_tensors=[dt2])
dt1_term = dt1.distribution.log_prob(dt1) * loss
dt2_term = dt2.distribution.log_prob(dt2) * loss
self.assertAllClose(*sess.run(
[sl_all, sum([loss, dt1_term, dt2_term])]))
self.assertAllClose(*sess.run([sl_dt1, sum([loss, dt1_term])]))
self.assertAllClose(*sess.run([sl_dt2, sum([loss, dt2_term])]))
class StochasticDependenciesMapTest(test.TestCase):
def testBuildsMapOfUpstreamNodes(self):
dt1 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.))
dt2 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.))
out1 = dt1.value() + 1.
out2 = dt2.value() + 2.
x = out1 + out2
y = out2 * 3.
dep_map = sg._stochastic_dependencies_map([x, y])
self.assertEqual(dep_map[dt1], set([x]))
self.assertEqual(dep_map[dt2], set([x, y]))
def testHandlesStackedStochasticNodes(self):
dt1 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.))
out1 = dt1.value() + 1.
dt2 = st.StochasticTensor(distributions.Normal(loc=out1, scale=1.))
x = dt2.value() + 2.
dt3 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.))
y = dt3.value() * 3.
dep_map = sg._stochastic_dependencies_map([x, y])
self.assertEqual(dep_map[dt1], set([x]))
self.assertEqual(dep_map[dt2], set([x]))
self.assertEqual(dep_map[dt3], set([y]))
def testTraversesControlInputs(self):
dt1 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.))
logits = dt1.value() * 3.
dt2 = st.StochasticTensor(distributions.Bernoulli(logits=logits))
dt3 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.))
x = dt3.value()
y = array_ops.ones((2, 2)) * 4.
z = array_ops.ones((2, 2)) * 3.
out = control_flow_ops.cond(
math_ops.cast(dt2, dtypes.bool), lambda: math_ops.add(x, y),
lambda: math_ops.square(z))
out += 5.
dep_map = sg._stochastic_dependencies_map([out])
self.assertEqual(dep_map[dt1], set([out]))
self.assertEqual(dep_map[dt2], set([out]))
self.assertEqual(dep_map[dt3], set([out]))
if __name__ == "__main__":
test.main()
| apache-2.0 |
elegion/djangodash2012 | fortuitus/settings_gondor.py | 1 | 1571 | import os
import urlparse
from .settings import * # NOQA
DEBUG = False
TEMPLATE_DEBUG = DEBUG
if 'GONDOR_DATABASE_URL' in os.environ:
urlparse.uses_netloc.append('postgres')
url = urlparse.urlparse(os.environ['GONDOR_DATABASE_URL'])
DATABASES = {
'default': {
'ENGINE': {
'postgres': 'django.db.backends.postgresql_psycopg2'
}[url.scheme],
'NAME': url.path[1:],
'USER': url.username,
'PASSWORD': url.password,
'HOST': url.hostname,
'PORT': url.port
}
}
SITE_ID = 1
if 'GONDOR_DATA_DIR' in os.environ:
MEDIA_ROOT = os.path.join(os.environ['GONDOR_DATA_DIR'],
'site_media', 'media')
STATIC_ROOT = os.path.join(os.environ['GONDOR_DATA_DIR'],
'site_media', 'static')
MEDIA_URL = '/site_media/media/'
STATIC_URL = '/site_media/static/'
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
FILE_UPLOAD_PERMISSIONS = 0640
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'INFO',
},
'django.request': {
'propagate': True,
},
}
}
COMPRESS_ENABLED = True
| mit |
saneyuki/servo | tests/wpt/web-platform-tests/mathml/tools/radicals.py | 101 | 3622 | #!/usr/bin/python
from utils import mathfont
import fontforge
def createStretchyRadical(aFont):
radicalCodePoint = 0x221a
mathfont.createSquareGlyph(aFont, radicalCodePoint)
g = aFont.createChar(-1, "size1")
mathfont.drawRectangleGlyph(g, mathfont.em, 2 * mathfont.em, 0)
g = aFont.createChar(-1, "size2")
mathfont.drawRectangleGlyph(g, mathfont.em, 3 * mathfont.em, 0)
g = aFont.createChar(-1, "size3")
mathfont.drawRectangleGlyph(g, mathfont.em, 4 * mathfont.em, 0)
overlap = mathfont.em / 2
aFont[radicalCodePoint].verticalVariants = "radical size1 size2 size3"
aFont[radicalCodePoint].verticalComponents = \
(("size2", False, 0, mathfont.em, 3 * mathfont.em), \
("size1", True, mathfont.em, mathfont.em, 2 * mathfont.em))
v1 = 25
v2 = 1 * mathfont.em
f = mathfont.create("radical-degreebottomraisepercent%d-rulethickness%d" % (v1, v2))
createStretchyRadical(f)
f.math.RadicalDegreeBottomRaisePercent = v1
f.math.RadicalDisplayStyleVerticalGap = 0
f.math.RadicalExtraAscender = 0
f.math.RadicalKernAfterDegree = 0
f.math.RadicalKernBeforeDegree = 0
f.math.RadicalRuleThickness = v2
f.math.RadicalVerticalGap = 0
mathfont.save(f)
v1 = 7 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("radical-displaystyleverticalgap%d-rulethickness%d" % (v1, v2))
createStretchyRadical(f)
f.math.RadicalDegreeBottomRaisePercent = 0
f.math.RadicalDisplayStyleVerticalGap = v1
f.math.RadicalExtraAscender = 0
f.math.RadicalKernAfterDegree = 0
f.math.RadicalKernBeforeDegree = 0
f.math.RadicalRuleThickness = v2
f.math.RadicalVerticalGap = 0
mathfont.save(f)
v1 = 3 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("radical-extraascender%d-rulethickness%d" % (v1, v2))
createStretchyRadical(f)
f.math.RadicalDegreeBottomRaisePercent = 0
f.math.RadicalDisplayStyleVerticalGap = 0
f.math.RadicalExtraAscender = v1
f.math.RadicalKernAfterDegree = 0
f.math.RadicalKernBeforeDegree = 0
f.math.RadicalRuleThickness = v2
f.math.RadicalVerticalGap = 0
mathfont.save(f)
v1 = 5 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("radical-kernafterdegreeminus%d-rulethickness%d" % (v1, v2))
createStretchyRadical(f)
f.math.RadicalDegreeBottomRaisePercent = 0
f.math.RadicalDisplayStyleVerticalGap = 0
f.math.RadicalExtraAscender = 0
f.math.RadicalKernAfterDegree = -v1
f.math.RadicalKernBeforeDegree = 0
f.math.RadicalRuleThickness = v2
f.math.RadicalVerticalGap = 0
mathfont.save(f)
v1 = 4 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("radical-kernbeforedegree%d-rulethickness%d" % (v1, v2))
createStretchyRadical(f)
f.math.RadicalDegreeBottomRaisePercent = 0
f.math.RadicalDisplayStyleVerticalGap = 0
f.math.RadicalExtraAscender = 0
f.math.RadicalKernAfterDegree = 0
f.math.RadicalKernBeforeDegree = v1
f.math.RadicalRuleThickness = v2
f.math.RadicalVerticalGap = 0
mathfont.save(f)
v = 8 * mathfont.em
f = mathfont.create("radical-rulethickness%d" % v)
createStretchyRadical(f)
f.math.RadicalDegreeBottomRaisePercent = 0
f.math.RadicalDisplayStyleVerticalGap = 0
f.math.RadicalExtraAscender = 0
f.math.RadicalKernAfterDegree = 0
f.math.RadicalKernBeforeDegree = 0
f.math.RadicalRuleThickness = v
f.math.RadicalVerticalGap = 0
mathfont.save(f)
v1 = 6 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("radical-verticalgap%d-rulethickness%d" % (v1, v2))
createStretchyRadical(f)
f.math.RadicalDegreeBottomRaisePercent = 0
f.math.RadicalDisplayStyleVerticalGap = 0
f.math.RadicalExtraAscender = 0
f.math.RadicalKernAfterDegree = 0
f.math.RadicalKernBeforeDegree = 0
f.math.RadicalRuleThickness = v2
f.math.RadicalVerticalGap = v1
mathfont.save(f)
| mpl-2.0 |
schets/LILAC | src/integrator/.ycm_extra_conf.py | 21 | 6201 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Wno-long-long',
'-fexceptions',
'-I../utils',
'-I../',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'../BoostParts',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'./ClangCompleter',
'-isystem',
'./tests/gmock/gtest',
'-isystem',
'./tests/gmock/gtest/include',
'-isystem',
'./tests/gmock',
'-isystem',
'./tests/gmock/include'
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.hpp', 'h', '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| bsd-3-clause |
suiyuan2009/tensorflow | tensorflow/tools/docs/doc_generator_visitor.py | 68 | 8418 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `traverse` visitor for processing documentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.util import tf_inspect
class DocGeneratorVisitor(object):
"""A visitor that generates docs for a python object when __call__ed."""
def __init__(self, root_name=''):
"""Make a visitor.
As this visitor is starting its traversal at a module or class, it will not
be told the name of that object during traversal. `root_name` is the name it
should use for that object, effectively prefixing all names with
"root_name.".
Args:
root_name: The name of the root module/class.
"""
self.set_root_name(root_name)
self._index = {}
self._tree = {}
self._reverse_index = None
self._duplicates = None
self._duplicate_of = None
def set_root_name(self, root_name):
"""Sets the root name for subsequent __call__s."""
self._root_name = root_name or ''
self._prefix = (root_name + '.') if root_name else ''
@property
def index(self):
"""A map from fully qualified names to objects to be documented.
The index is filled when the visitor is passed to `traverse`.
Returns:
The index filled by traversal.
"""
return self._index
@property
def tree(self):
"""A map from fully qualified names to all its child names for traversal.
The full name to member names map is filled when the visitor is passed to
`traverse`.
Returns:
The full name to member name map filled by traversal.
"""
return self._tree
@property
def reverse_index(self):
"""A map from `id(object)` to the preferred fully qualified name.
This map only contains non-primitive objects (no numbers or strings) present
in `index` (for primitive objects, `id()` doesn't quite do the right thing).
It is computed when it, `duplicate_of`, or `duplicates` are first accessed.
Returns:
The `id(object)` to full name map.
"""
self._maybe_find_duplicates()
return self._reverse_index
@property
def duplicate_of(self):
"""A map from duplicate full names to a preferred fully qualified name.
This map only contains names that are not themself a preferred name.
It is computed when it, `reverse_index`, or `duplicates` are first accessed.
Returns:
The map from duplicate name to preferred name.
"""
self._maybe_find_duplicates()
return self._duplicate_of
@property
def duplicates(self):
"""A map from preferred full names to a list of all names for this symbol.
This function returns a map from preferred (master) name for a symbol to a
lexicographically sorted list of all aliases for that name (incl. the master
name). Symbols without duplicate names do not appear in this map.
It is computed when it, `reverse_index`, or `duplicate_of` are first
accessed.
Returns:
The map from master name to list of all duplicate names.
"""
self._maybe_find_duplicates()
return self._duplicates
def _add_prefix(self, name):
"""Adds the root name to a name."""
return self._prefix + name if name else self._root_name
def __call__(self, parent_name, parent, children):
"""Visitor interface, see `tensorflow/tools/common:traverse` for details.
This method is called for each symbol found in a traversal using
`tensorflow/tools/common:traverse`. It should not be called directly in
user code.
Args:
parent_name: The fully qualified name of a symbol found during traversal.
parent: The Python object referenced by `parent_name`.
children: A list of `(name, py_object)` pairs enumerating, in alphabetical
order, the children (as determined by `tf_inspect.getmembers`) of
`parent`. `name` is the local name of `py_object` in `parent`.
Raises:
RuntimeError: If this visitor is called with a `parent` that is not a
class or module.
"""
parent_name = self._add_prefix(parent_name)
self._index[parent_name] = parent
self._tree[parent_name] = []
if not (tf_inspect.ismodule(parent) or tf_inspect.isclass(parent)):
raise RuntimeError('Unexpected type in visitor -- %s: %r' % (parent_name,
parent))
for i, (name, child) in enumerate(list(children)):
# Don't document __metaclass__
if name in ['__metaclass__']:
del children[i]
continue
full_name = '.'.join([parent_name, name]) if parent_name else name
self._index[full_name] = child
self._tree[parent_name].append(name)
def _maybe_find_duplicates(self):
"""Compute data structures containing information about duplicates.
Find duplicates in `index` and decide on one to be the "master" name.
Computes a reverse_index mapping each object id to its master name.
Also computes a map `duplicate_of` from aliases to their master name (the
master name itself has no entry in this map), and a map `duplicates` from
master names to a lexicographically sorted list of all aliases for that name
(incl. the master name).
All these are computed and set as fields if they haven't already.
"""
if self._reverse_index is not None:
return
# Maps the id of a symbol to its fully qualified name. For symbols that have
# several aliases, this map contains the first one found.
# We use id(py_object) to get a hashable value for py_object. Note all
# objects in _index are in memory at the same time so this is safe.
reverse_index = {}
# Make a preliminary duplicates map. For all sets of duplicate names, it
# maps the first name found to a list of all duplicate names.
raw_duplicates = {}
for full_name, py_object in six.iteritems(self._index):
# We cannot use the duplicate mechanism for some constants, since e.g.,
# id(c1) == id(c2) with c1=1, c2=1. This is unproblematic since constants
# have no usable docstring and won't be documented automatically.
if (py_object is not None and
not isinstance(py_object, six.integer_types + six.string_types +
(six.binary_type, six.text_type, float, complex, bool))
and py_object is not ()):
object_id = id(py_object)
if object_id in reverse_index:
master_name = reverse_index[object_id]
if master_name in raw_duplicates:
raw_duplicates[master_name].append(full_name)
else:
raw_duplicates[master_name] = [master_name, full_name]
else:
reverse_index[object_id] = full_name
# Decide on master names, rewire duplicates and make a duplicate_of map
# mapping all non-master duplicates to the master name. The master symbol
# does not have an entry in this map.
duplicate_of = {}
# Duplicates maps the main symbols to the set of all duplicates of that
# symbol (incl. itself).
duplicates = {}
for names in raw_duplicates.values():
names = sorted(names)
# Choose the lexicographically first name with the minimum number of
# submodules. This will prefer highest level namespace for any symbol.
master_name = min(names, key=lambda name: name.count('.'))
duplicates[master_name] = names
for name in names:
if name != master_name:
duplicate_of[name] = master_name
# Set the reverse index to the canonical name.
reverse_index[id(self._index[master_name])] = master_name
self._duplicate_of = duplicate_of
self._duplicates = duplicates
self._reverse_index = reverse_index
| apache-2.0 |
dmccue/ansible | v1/ansible/runner/lookup_plugins/flattened.py | 122 | 2428 | # (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.utils as utils
import ansible.errors as errors
def check_list_of_one_list(term):
# make sure term is not a list of one (list of one..) item
# return the final non list item if so
if isinstance(term,list) and len(term) == 1:
term = term[0]
if isinstance(term,list):
term = check_list_of_one_list(term)
return term
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def flatten(self, terms, inject):
ret = []
for term in terms:
term = check_list_of_one_list(term)
if term == 'None' or term == 'null':
# ignore undefined items
break
if isinstance(term, basestring):
# convert a variable to a list
term2 = utils.listify_lookup_plugin_terms(term, self.basedir, inject)
# but avoid converting a plain string to a list of one string
if term2 != [ term ]:
term = term2
if isinstance(term, list):
# if it's a list, check recursively for items that are a list
term = self.flatten(term, inject)
ret.extend(term)
else:
ret.append(term)
return ret
def run(self, terms, inject=None, **kwargs):
# see if the string represents a list and convert to list if so
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if not isinstance(terms, list):
raise errors.AnsibleError("with_flattened expects a list")
ret = self.flatten(terms, inject)
return ret
| gpl-3.0 |
peterbe/bramble | vendor-local/lib/python/werkzeug/templates.py | 95 | 13713 | # -*- coding: utf-8 -*-
r"""
werkzeug.templates
~~~~~~~~~~~~~~~~~~
A minimal template engine.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD License.
"""
import sys
import re
import __builtin__ as builtins
from compiler import ast, parse
from compiler.pycodegen import ModuleCodeGenerator
from tokenize import PseudoToken
from werkzeug import urls, utils
from werkzeug._internal import _decode_unicode
from werkzeug.datastructures import MultiDict
from warnings import warn
warn(DeprecationWarning('werkzeug.templates is deprecated and '
'will be removed in Werkzeug 1.0'))
# Copyright notice: The `parse_data` method uses the string interpolation
# algorithm by Ka-Ping Yee which originally was part of `Itpl20.py`_.
#
# .. _Itpl20.py: http://lfw.org/python/Itpl20.py
token_re = re.compile('%s|%s(?s)' % (
r'[uU]?[rR]?("""|\'\'\')((?<!\\)\\\1|.)*?\1',
PseudoToken
))
directive_re = re.compile(r'(?<!\\)<%(?:(#)|(py(?:thon)?\b)|'
r'(?:\s*(\w+))\s*)(.*?)\s*%>\n?(?s)')
escape_re = re.compile(r'\\\n|\\(\\|<%)')
namestart_chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
undefined = type('UndefinedType', (object,), {
'__iter__': lambda x: iter(()),
'__repr__': lambda x: 'Undefined',
'__str__': lambda x: ''
})()
runtime_vars = frozenset(['Undefined', '__to_unicode', '__context',
'__write', '__write_many'])
def call_stmt(func, args, lineno):
return ast.CallFunc(ast.Name(func, lineno=lineno),
args, lineno=lineno)
def tokenize(source, filename):
escape = escape_re.sub
escape_repl = lambda m: m.group(1) or ''
lineno = 1
pos = 0
for match in directive_re.finditer(source):
start, end = match.span()
if start > pos:
data = source[pos:start]
yield lineno, 'data', escape(escape_repl, data)
lineno += data.count('\n')
is_comment, is_code, cmd, args = match.groups()
if is_code:
yield lineno, 'code', args
elif not is_comment:
yield lineno, 'cmd', (cmd, args)
lineno += source[start:end].count('\n')
pos = end
if pos < len(source):
yield lineno, 'data', escape(escape_repl, source[pos:])
def transform(node, filename):
root = ast.Module(None, node, lineno=1)
nodes = [root]
while nodes:
node = nodes.pop()
node.filename = filename
if node.__class__ in (ast.Printnl, ast.Print):
node.dest = ast.Name('__context')
elif node.__class__ is ast.Const and isinstance(node.value, str):
try:
node.value.decode('ascii')
except UnicodeError:
node.value = node.value.decode('utf-8')
nodes.extend(node.getChildNodes())
return root
class TemplateSyntaxError(SyntaxError):
def __init__(self, msg, filename, lineno):
from linecache import getline
l = getline(filename, lineno)
SyntaxError.__init__(self, msg, (filename, lineno, len(l) or 1, l))
class Parser(object):
def __init__(self, gen, filename):
self.gen = gen
self.filename = filename
self.lineno = 1
def fail(self, msg):
raise TemplateSyntaxError(msg, self.filename, self.lineno)
def parse_python(self, expr, type='exec'):
if isinstance(expr, unicode):
expr = '\xef\xbb\xbf' + expr.encode('utf-8')
try:
node = parse(expr, type)
except SyntaxError, e:
raise TemplateSyntaxError(str(e), self.filename,
self.lineno + e.lineno - 1)
nodes = [node]
while nodes:
n = nodes.pop()
if hasattr(n, 'lineno'):
n.lineno = (n.lineno or 1) + self.lineno - 1
nodes.extend(n.getChildNodes())
return node.node
def parse(self, needle=()):
start_lineno = self.lineno
result = []
add = result.append
for self.lineno, token, value in self.gen:
if token == 'data':
add(self.parse_data(value))
elif token == 'code':
add(self.parse_code(value.splitlines()))
elif token == 'cmd':
name, args = value
if name in needle:
return name, args, ast.Stmt(result, lineno=start_lineno)
if name in ('for', 'while'):
add(self.parse_loop(args, name))
elif name == 'if':
add(self.parse_if(args))
else:
self.fail('unknown directive %s' % name)
if needle:
self.fail('unexpected end of template')
return ast.Stmt(result, lineno=start_lineno)
def parse_loop(self, args, type):
rv = self.parse_python('%s %s: pass' % (type, args), 'exec').nodes[0]
tag, value, rv.body = self.parse(('end' + type, 'else'))
if value:
self.fail('unexpected data after ' + tag)
if tag == 'else':
tag, value, rv.else_ = self.parse(('end' + type,))
if value:
self.fail('unexpected data after else')
return rv
def parse_if(self, args):
cond = self.parse_python('if %s: pass' % args).nodes[0]
tag, value, body = self.parse(('else', 'elif', 'endif'))
cond.tests[0] = (cond.tests[0][0], body)
while 1:
if tag == 'else':
if value:
self.fail('unexpected data after else')
tag, value, cond.else_ = self.parse(('endif',))
elif tag == 'elif':
expr = self.parse_python(value, 'eval')
tag, value, body = self.parse(('else', 'elif', 'endif'))
cond.tests.append((expr, body))
continue
break
if value:
self.fail('unexpected data after endif')
return cond
def parse_code(self, lines):
margin = sys.maxint
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxint:
for i in xrange(1, len(lines)):
lines[i] = lines[i][margin:]
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return self.parse_python('\n'.join(lines))
def parse_data(self, text):
start_lineno = lineno = self.lineno
pos = 0
end = len(text)
nodes = []
def match_or_fail(pos):
match = token_re.match(text, pos)
if match is None:
self.fail('invalid syntax')
return match.group().strip(), match.end()
def write_expr(code):
node = self.parse_python(code, 'eval')
nodes.append(call_stmt('__to_unicode', [node], lineno))
return code.count('\n')
def write_data(value):
if value:
nodes.append(ast.Const(value, lineno=lineno))
return value.count('\n')
return 0
while 1:
offset = text.find('$', pos)
if offset < 0:
break
next = text[offset + 1]
if next == '{':
lineno += write_data(text[pos:offset])
pos = offset + 2
level = 1
while level:
token, pos = match_or_fail(pos)
if token in ('{', '}'):
level += token == '{' and 1 or -1
lineno += write_expr(text[offset + 2:pos - 1])
elif next in namestart_chars:
lineno += write_data(text[pos:offset])
token, pos = match_or_fail(offset + 1)
while pos < end:
if text[pos] == '.' and pos + 1 < end and \
text[pos + 1] in namestart_chars:
token, pos = match_or_fail(pos + 1)
elif text[pos] in '([':
pos += 1
level = 1
while level:
token, pos = match_or_fail(pos)
if token in ('(', ')', '[', ']'):
level += token in '([' and 1 or -1
else:
break
lineno += write_expr(text[offset + 1:pos])
else:
lineno += write_data(text[pos:offset + 1])
pos = offset + 1 + (next == '$')
write_data(text[pos:])
return ast.Discard(call_stmt(len(nodes) == 1 and '__write' or
'__write_many', nodes, start_lineno),
lineno=start_lineno)
class Context(object):
def __init__(self, namespace, charset, errors):
self.charset = charset
self.errors = errors
self._namespace = namespace
self._buffer = []
self._write = self._buffer.append
_extend = self._buffer.extend
self.runtime = dict(
Undefined=undefined,
__to_unicode=self.to_unicode,
__context=self,
__write=self._write,
__write_many=lambda *a: _extend(a)
)
def write(self, value):
self._write(self.to_unicode(value))
def to_unicode(self, value):
if isinstance(value, str):
return _decode_unicode(value, self.charset, self.errors)
return unicode(value)
def get_value(self, as_unicode=True):
rv = u''.join(self._buffer)
if not as_unicode:
return rv.encode(self.charset, self.errors)
return rv
def __getitem__(self, key, default=undefined):
try:
return self._namespace[key]
except KeyError:
return getattr(builtins, key, default)
def get(self, key, default=None):
return self.__getitem__(key, default)
def __setitem__(self, key, value):
self._namespace[key] = value
def __delitem__(self, key):
del self._namespace[key]
class TemplateCodeGenerator(ModuleCodeGenerator):
def __init__(self, node, filename):
ModuleCodeGenerator.__init__(self, transform(node, filename))
def _nameOp(self, prefix, name):
if name in runtime_vars:
return self.emit(prefix + '_GLOBAL', name)
return ModuleCodeGenerator._nameOp(self, prefix, name)
class Template(object):
"""Represents a simple text based template. It's a good idea to load such
templates from files on the file system to get better debug output.
"""
default_context = {
'escape': utils.escape,
'url_quote': urls.url_quote,
'url_quote_plus': urls.url_quote_plus,
'url_encode': urls.url_encode
}
def __init__(self, source, filename='<template>', charset='utf-8',
errors='strict', unicode_mode=True):
if isinstance(source, str):
source = _decode_unicode(source, charset, errors)
if isinstance(filename, unicode):
filename = filename.encode('utf-8')
node = Parser(tokenize(u'\n'.join(source.splitlines()),
filename), filename).parse()
self.code = TemplateCodeGenerator(node, filename).getCode()
self.filename = filename
self.charset = charset
self.errors = errors
self.unicode_mode = unicode_mode
@classmethod
def from_file(cls, file, charset='utf-8', errors='strict',
unicode_mode=True):
"""Load a template from a file.
.. versionchanged:: 0.5
The encoding parameter was renamed to charset.
:param file: a filename or file object to load the template from.
:param charset: the charset of the template to load.
:param errors: the error behavior of the charset decoding.
:param unicode_mode: set to `False` to disable unicode mode.
:return: a template
"""
close = False
f = file
if isinstance(file, basestring):
f = open(file, 'r')
close = True
try:
data = _decode_unicode(f.read(), charset, errors)
finally:
if close:
f.close()
return cls(data, getattr(f, 'name', '<template>'), charset,
errors, unicode_mode)
def render(self, *args, **kwargs):
"""This function accepts either a dict or some keyword arguments which
will then be the context the template is evaluated in. The return
value will be the rendered template.
:param context: the function accepts the same arguments as the
:class:`dict` constructor.
:return: the rendered template as string
"""
ns = self.default_context.copy()
if len(args) == 1 and isinstance(args[0], MultiDict):
ns.update(args[0].to_dict(flat=True))
else:
ns.update(dict(*args))
if kwargs:
ns.update(kwargs)
context = Context(ns, self.charset, self.errors)
exec self.code in context.runtime, context
return context.get_value(self.unicode_mode)
def substitute(self, *args, **kwargs):
"""For API compatibility with `string.Template`."""
return self.render(*args, **kwargs)
| mpl-2.0 |
shsingh/ansible | lib/ansible/modules/cloud/vmware/_vmware_host_feature_facts.py | 21 | 4460 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_feature_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favour of C(_info) module.
alternative: Use M(vmware_host_feature_info) instead.
short_description: Gathers facts about an ESXi host's feature capability information
description:
- This module can be used to gather facts about an ESXi host's feature capability information when ESXi hostname or Cluster name is given.
version_added: 2.8
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster from all host systems to be used for facts gathering.
- If C(esxi_hostname) is not given, this parameter is required.
type: str
esxi_hostname:
description:
- ESXi hostname to gather facts from.
- If C(cluster_name) is not given, this parameter is required.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather feature capability facts about all ESXi Hosts in given Cluster
vmware_host_feature_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
delegate_to: localhost
register: all_cluster_hosts_facts
- name: Check if ESXi is vulnerable for Speculative Store Bypass Disable (SSBD) vulnerability
vmware_host_feature_facts:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
validate_certs: no
esxi_hostname: "{{ esxi_hostname }}"
register: features_set
- set_fact:
ssbd : "{{ item.value }}"
loop: "{{ features_set.host_feature_facts[esxi_hostname] |json_query(name) }}"
vars:
name: "[?key=='cpuid.SSBD']"
- assert:
that:
- ssbd|int == 1
when: ssbd is defined
'''
RETURN = r'''
hosts_feature_facts:
description: metadata about host's feature capability information
returned: always
type: dict
sample: {
"10.76.33.226": [
{
"feature_name": "cpuid.3DNOW",
"key": "cpuid.3DNOW",
"value": "0"
},
{
"feature_name": "cpuid.3DNOWPLUS",
"key": "cpuid.3DNOWPLUS",
"value": "0"
},
]
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class FeatureCapabilityFactsManager(PyVmomi):
def __init__(self, module):
super(FeatureCapabilityFactsManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
def gather_host_feature_facts(self):
host_feature_facts = dict()
for host in self.hosts:
host_feature_capabilities = host.config.featureCapability
capability = []
for fc in host_feature_capabilities:
temp_dict = {
'key': fc.key,
'feature_name': fc.featureName,
'value': fc.value,
}
capability.append(temp_dict)
host_feature_facts[host.name] = capability
return host_feature_facts
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True,
)
host_capability_manager = FeatureCapabilityFactsManager(module)
module.exit_json(changed=False,
hosts_feature_facts=host_capability_manager.gather_host_feature_facts())
if __name__ == "__main__":
main()
| gpl-3.0 |
defionscode/ansible | test/units/modules/network/nxos/test_nxos_evpn_vni.py | 50 | 2906 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_evpn_vni
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosEvpnVniModule(TestNxosModule):
module = nxos_evpn_vni
def setUp(self):
super(TestNxosEvpnVniModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_evpn_vni.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_evpn_vni.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_evpn_vni.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosEvpnVniModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('', 'nxos_evpn_vni_config.cfg')
self.load_config.return_value = None
def test_nxos_evpn_vni_present(self):
set_module_args(dict(vni='6000',
route_target_import='5000:10',
state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['evpn',
'vni 6000 l2',
'route-target import 5000:10',
'no route-target import auto'])
def test_nxos_evpn_vni_absent_not_existing(self):
set_module_args(dict(vni='12000', state='absent'))
result = self.execute_module(changed=False)
self.assertEqual(result['commands'], [])
def test_nxos_evpn_vni_absent_existing(self):
set_module_args(dict(vni='6000', state='absent'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['evpn', 'no vni 6000 l2'])
| gpl-3.0 |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/fractions.py | 252 | 22390 | # Originally contributed by Sjoerd Mullender.
# Significantly modified by Jeffrey Yasskin <jyasskin at gmail.com>.
"""Rational, infinite-precision, real numbers."""
from __future__ import division
from decimal import Decimal
import math
import numbers
import operator
import re
__all__ = ['Fraction', 'gcd']
Rational = numbers.Rational
def gcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
while b:
a, b = b, a%b
return a
_RATIONAL_FORMAT = re.compile(r"""
\A\s* # optional whitespace at the start, then
(?P<sign>[-+]?) # an optional sign, then
(?=\d|\.\d) # lookahead for digit or .digit
(?P<num>\d*) # numerator (possibly empty)
(?: # followed by
(?:/(?P<denom>\d+))? # an optional denominator
| # or
(?:\.(?P<decimal>\d*))? # an optional fractional part
(?:E(?P<exp>[-+]?\d+))? # and optional exponent
)
\s*\Z # and optional whitespace to finish
""", re.VERBOSE | re.IGNORECASE)
class Fraction(Rational):
"""This class implements rational numbers.
In the two-argument form of the constructor, Fraction(8, 6) will
produce a rational number equivalent to 4/3. Both arguments must
be Rational. The numerator defaults to 0 and the denominator
defaults to 1 so that Fraction(3) == 3 and Fraction() == 0.
Fractions can also be constructed from:
- numeric strings similar to those accepted by the
float constructor (for example, '-2.3' or '1e10')
- strings of the form '123/456'
- float and Decimal instances
- other Rational instances (including integers)
"""
__slots__ = ('_numerator', '_denominator')
# We're immutable, so use __new__ not __init__
def __new__(cls, numerator=0, denominator=None):
"""Constructs a Fraction.
Takes a string like '3/2' or '1.5', another Rational instance, a
numerator/denominator pair, or a float.
Examples
--------
>>> Fraction(10, -8)
Fraction(-5, 4)
>>> Fraction(Fraction(1, 7), 5)
Fraction(1, 35)
>>> Fraction(Fraction(1, 7), Fraction(2, 3))
Fraction(3, 14)
>>> Fraction('314')
Fraction(314, 1)
>>> Fraction('-35/4')
Fraction(-35, 4)
>>> Fraction('3.1415') # conversion from numeric string
Fraction(6283, 2000)
>>> Fraction('-47e-2') # string may include a decimal exponent
Fraction(-47, 100)
>>> Fraction(1.47) # direct construction from float (exact conversion)
Fraction(6620291452234629, 4503599627370496)
>>> Fraction(2.25)
Fraction(9, 4)
>>> Fraction(Decimal('1.47'))
Fraction(147, 100)
"""
self = super(Fraction, cls).__new__(cls)
if denominator is None:
if isinstance(numerator, Rational):
self._numerator = numerator.numerator
self._denominator = numerator.denominator
return self
elif isinstance(numerator, float):
# Exact conversion from float
value = Fraction.from_float(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, Decimal):
value = Fraction.from_decimal(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, basestring):
# Handle construction from strings.
m = _RATIONAL_FORMAT.match(numerator)
if m is None:
raise ValueError('Invalid literal for Fraction: %r' %
numerator)
numerator = int(m.group('num') or '0')
denom = m.group('denom')
if denom:
denominator = int(denom)
else:
denominator = 1
decimal = m.group('decimal')
if decimal:
scale = 10**len(decimal)
numerator = numerator * scale + int(decimal)
denominator *= scale
exp = m.group('exp')
if exp:
exp = int(exp)
if exp >= 0:
numerator *= 10**exp
else:
denominator *= 10**-exp
if m.group('sign') == '-':
numerator = -numerator
else:
raise TypeError("argument should be a string "
"or a Rational instance")
elif (isinstance(numerator, Rational) and
isinstance(denominator, Rational)):
numerator, denominator = (
numerator.numerator * denominator.denominator,
denominator.numerator * numerator.denominator
)
else:
raise TypeError("both arguments should be "
"Rational instances")
if denominator == 0:
raise ZeroDivisionError('Fraction(%s, 0)' % numerator)
g = gcd(numerator, denominator)
self._numerator = numerator // g
self._denominator = denominator // g
return self
@classmethod
def from_float(cls, f):
"""Converts a finite float to a rational number, exactly.
Beware that Fraction.from_float(0.3) != Fraction(3, 10).
"""
if isinstance(f, numbers.Integral):
return cls(f)
elif not isinstance(f, float):
raise TypeError("%s.from_float() only takes floats, not %r (%s)" %
(cls.__name__, f, type(f).__name__))
if math.isnan(f) or math.isinf(f):
raise TypeError("Cannot convert %r to %s." % (f, cls.__name__))
return cls(*f.as_integer_ratio())
@classmethod
def from_decimal(cls, dec):
"""Converts a finite Decimal instance to a rational number, exactly."""
from decimal import Decimal
if isinstance(dec, numbers.Integral):
dec = Decimal(int(dec))
elif not isinstance(dec, Decimal):
raise TypeError(
"%s.from_decimal() only takes Decimals, not %r (%s)" %
(cls.__name__, dec, type(dec).__name__))
if not dec.is_finite():
# Catches infinities and nans.
raise TypeError("Cannot convert %s to %s." % (dec, cls.__name__))
sign, digits, exp = dec.as_tuple()
digits = int(''.join(map(str, digits)))
if sign:
digits = -digits
if exp >= 0:
return cls(digits * 10 ** exp)
else:
return cls(digits, 10 ** -exp)
def limit_denominator(self, max_denominator=1000000):
"""Closest Fraction to self with denominator at most max_denominator.
>>> Fraction('3.141592653589793').limit_denominator(10)
Fraction(22, 7)
>>> Fraction('3.141592653589793').limit_denominator(100)
Fraction(311, 99)
>>> Fraction(4321, 8765).limit_denominator(10000)
Fraction(4321, 8765)
"""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
if self._denominator <= max_denominator:
return Fraction(self)
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = self._numerator, self._denominator
while True:
a = n//d
q2 = q0+a*q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
n, d = d, n-a*d
k = (max_denominator-q0)//q1
bound1 = Fraction(p0+k*p1, q0+k*q1)
bound2 = Fraction(p1, q1)
if abs(bound2 - self) <= abs(bound1-self):
return bound2
else:
return bound1
@property
def numerator(a):
return a._numerator
@property
def denominator(a):
return a._denominator
def __repr__(self):
"""repr(self)"""
return ('Fraction(%s, %s)' % (self._numerator, self._denominator))
def __str__(self):
"""str(self)"""
if self._denominator == 1:
return str(self._numerator)
else:
return '%s/%s' % (self._numerator, self._denominator)
def _operator_fallbacks(monomorphic_operator, fallback_operator):
"""Generates forward and reverse operators given a purely-rational
operator and a function from the operator module.
Use this like:
__op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op)
In general, we want to implement the arithmetic operations so
that mixed-mode operations either call an implementation whose
author knew about the types of both arguments, or convert both
to the nearest built in type and do the operation there. In
Fraction, that means that we define __add__ and __radd__ as:
def __add__(self, other):
# Both types have numerators/denominator attributes,
# so do the operation directly
if isinstance(other, (int, long, Fraction)):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
# float and complex don't have those operations, but we
# know about those types, so special case them.
elif isinstance(other, float):
return float(self) + other
elif isinstance(other, complex):
return complex(self) + other
# Let the other type take over.
return NotImplemented
def __radd__(self, other):
# radd handles more types than add because there's
# nothing left to fall back to.
if isinstance(other, Rational):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
elif isinstance(other, Real):
return float(other) + float(self)
elif isinstance(other, Complex):
return complex(other) + complex(self)
return NotImplemented
There are 5 different cases for a mixed-type addition on
Fraction. I'll refer to all of the above code that doesn't
refer to Fraction, float, or complex as "boilerplate". 'r'
will be an instance of Fraction, which is a subtype of
Rational (r : Fraction <: Rational), and b : B <:
Complex. The first three involve 'r + b':
1. If B <: Fraction, int, float, or complex, we handle
that specially, and all is well.
2. If Fraction falls back to the boilerplate code, and it
were to return a value from __add__, we'd miss the
possibility that B defines a more intelligent __radd__,
so the boilerplate should return NotImplemented from
__add__. In particular, we don't handle Rational
here, even though we could get an exact answer, in case
the other type wants to do something special.
3. If B <: Fraction, Python tries B.__radd__ before
Fraction.__add__. This is ok, because it was
implemented with knowledge of Fraction, so it can
handle those instances before delegating to Real or
Complex.
The next two situations describe 'b + r'. We assume that b
didn't know about Fraction in its implementation, and that it
uses similar boilerplate code:
4. If B <: Rational, then __radd_ converts both to the
builtin rational type (hey look, that's us) and
proceeds.
5. Otherwise, __radd__ tries to find the nearest common
base ABC, and fall back to its builtin type. Since this
class doesn't subclass a concrete type, there's no
implementation to fall back to, so we need to try as
hard as possible to return an actual value, or the user
will get a TypeError.
"""
def forward(a, b):
if isinstance(b, (int, long, Fraction)):
return monomorphic_operator(a, b)
elif isinstance(b, float):
return fallback_operator(float(a), b)
elif isinstance(b, complex):
return fallback_operator(complex(a), b)
else:
return NotImplemented
forward.__name__ = '__' + fallback_operator.__name__ + '__'
forward.__doc__ = monomorphic_operator.__doc__
def reverse(b, a):
if isinstance(a, Rational):
# Includes ints.
return monomorphic_operator(a, b)
elif isinstance(a, numbers.Real):
return fallback_operator(float(a), float(b))
elif isinstance(a, numbers.Complex):
return fallback_operator(complex(a), complex(b))
else:
return NotImplemented
reverse.__name__ = '__r' + fallback_operator.__name__ + '__'
reverse.__doc__ = monomorphic_operator.__doc__
return forward, reverse
def _add(a, b):
"""a + b"""
return Fraction(a.numerator * b.denominator +
b.numerator * a.denominator,
a.denominator * b.denominator)
__add__, __radd__ = _operator_fallbacks(_add, operator.add)
def _sub(a, b):
"""a - b"""
return Fraction(a.numerator * b.denominator -
b.numerator * a.denominator,
a.denominator * b.denominator)
__sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub)
def _mul(a, b):
"""a * b"""
return Fraction(a.numerator * b.numerator, a.denominator * b.denominator)
__mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul)
def _div(a, b):
"""a / b"""
return Fraction(a.numerator * b.denominator,
a.denominator * b.numerator)
__truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv)
__div__, __rdiv__ = _operator_fallbacks(_div, operator.div)
def __floordiv__(a, b):
"""a // b"""
# Will be math.floor(a / b) in 3.0.
div = a / b
if isinstance(div, Rational):
# trunc(math.floor(div)) doesn't work if the rational is
# more precise than a float because the intermediate
# rounding may cross an integer boundary.
return div.numerator // div.denominator
else:
return math.floor(div)
def __rfloordiv__(b, a):
"""a // b"""
# Will be math.floor(a / b) in 3.0.
div = a / b
if isinstance(div, Rational):
# trunc(math.floor(div)) doesn't work if the rational is
# more precise than a float because the intermediate
# rounding may cross an integer boundary.
return div.numerator // div.denominator
else:
return math.floor(div)
def __mod__(a, b):
"""a % b"""
div = a // b
return a - b * div
def __rmod__(b, a):
"""a % b"""
div = a // b
return a - b * div
def __pow__(a, b):
"""a ** b
If b is not an integer, the result will be a float or complex
since roots are generally irrational. If b is an integer, the
result will be rational.
"""
if isinstance(b, Rational):
if b.denominator == 1:
power = b.numerator
if power >= 0:
return Fraction(a._numerator ** power,
a._denominator ** power)
else:
return Fraction(a._denominator ** -power,
a._numerator ** -power)
else:
# A fractional power will generally produce an
# irrational number.
return float(a) ** float(b)
else:
return float(a) ** b
def __rpow__(b, a):
"""a ** b"""
if b._denominator == 1 and b._numerator >= 0:
# If a is an int, keep it that way if possible.
return a ** b._numerator
if isinstance(a, Rational):
return Fraction(a.numerator, a.denominator) ** b
if b._denominator == 1:
return a ** b._numerator
return a ** float(b)
def __pos__(a):
"""+a: Coerces a subclass instance to Fraction"""
return Fraction(a._numerator, a._denominator)
def __neg__(a):
"""-a"""
return Fraction(-a._numerator, a._denominator)
def __abs__(a):
"""abs(a)"""
return Fraction(abs(a._numerator), a._denominator)
def __trunc__(a):
"""trunc(a)"""
if a._numerator < 0:
return -(-a._numerator // a._denominator)
else:
return a._numerator // a._denominator
def __hash__(self):
"""hash(self)
Tricky because values that are exactly representable as a
float must have the same hash as that float.
"""
# XXX since this method is expensive, consider caching the result
if self._denominator == 1:
# Get integers right.
return hash(self._numerator)
# Expensive check, but definitely correct.
if self == float(self):
return hash(float(self))
else:
# Use tuple's hash to avoid a high collision rate on
# simple fractions.
return hash((self._numerator, self._denominator))
def __eq__(a, b):
"""a == b"""
if isinstance(b, Rational):
return (a._numerator == b.numerator and
a._denominator == b.denominator)
if isinstance(b, numbers.Complex) and b.imag == 0:
b = b.real
if isinstance(b, float):
if math.isnan(b) or math.isinf(b):
# comparisons with an infinity or nan should behave in
# the same way for any finite a, so treat a as zero.
return 0.0 == b
else:
return a == a.from_float(b)
else:
# Since a doesn't know how to compare with b, let's give b
# a chance to compare itself with a.
return NotImplemented
def _richcmp(self, other, op):
"""Helper for comparison operators, for internal use only.
Implement comparison between a Rational instance `self`, and
either another Rational instance or a float `other`. If
`other` is not a Rational instance or a float, return
NotImplemented. `op` should be one of the six standard
comparison operators.
"""
# convert other to a Rational instance where reasonable.
if isinstance(other, Rational):
return op(self._numerator * other.denominator,
self._denominator * other.numerator)
# comparisons with complex should raise a TypeError, for consistency
# with int<->complex, float<->complex, and complex<->complex comparisons.
if isinstance(other, complex):
raise TypeError("no ordering relation is defined for complex numbers")
if isinstance(other, float):
if math.isnan(other) or math.isinf(other):
return op(0.0, other)
else:
return op(self, self.from_float(other))
else:
return NotImplemented
def __lt__(a, b):
"""a < b"""
return a._richcmp(b, operator.lt)
def __gt__(a, b):
"""a > b"""
return a._richcmp(b, operator.gt)
def __le__(a, b):
"""a <= b"""
return a._richcmp(b, operator.le)
def __ge__(a, b):
"""a >= b"""
return a._richcmp(b, operator.ge)
def __nonzero__(a):
"""a != 0"""
return a._numerator != 0
# support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) == Fraction:
return self # I'm immutable; therefore I am my own clone
return self.__class__(self._numerator, self._denominator)
def __deepcopy__(self, memo):
if type(self) == Fraction:
return self # My components are also immutable
return self.__class__(self._numerator, self._denominator)
| bsd-3-clause |
cliffe/SecGen | modules/utilities/unix/labtainers/files/Labtainers-master/distrib/publish_grader.py | 2 | 2715 | #!/usr/bin/env python
import os
import sys
import argparse
sys.path.append('../scripts/labtainer-student/bin')
import labutils
import ParseLabtainerConfig
import LabtainerLogging
import VersionInfo
def relabel(image, version, base_image, base_id, registry):
with open('./dfile', 'w') as fh:
fh.write('FROM %s\n' % image)
fh.write('ARG version\n')
fh.write('LABEL version=%s\n' % version)
fh.write('LABEL base=%s.%s' % (base_image, base_id))
cmd = 'docker build -f dfile -t %s.tmp .' % image
os.system(cmd)
cmd = 'docker tag %s.tmp %s/%s' % (image, registry, image)
print cmd
os.system(cmd)
cmd = 'docker push %s/%s' % (registry, image)
print cmd
os.system(cmd)
cmd = 'docker tag %s.tmp %s/%s:base_image%s' % (image, registry, image, base_id)
print cmd
os.system(cmd)
cmd = 'docker push %s/%s:base_image%s' % (registry, image, base_id)
print cmd
os.system(cmd)
def main():
parser = argparse.ArgumentParser(description='Build and publish the grader')
parser.add_argument('-t', '--test_registry', action='store_true', help='Use image from test registry')
args = parser.parse_args()
if args.test_registry:
if os.getenv('TEST_REGISTRY') is None:
print('use putenv to set it')
os.putenv("TEST_REGISTRY", "TRUE")
''' why does putenv not set the value? '''
os.environ['TEST_REGISTRY'] = 'TRUE'
else:
print('exists, set it true')
os.environ['TEST_REGISTRY'] = 'TRUE'
print('set TEST REG to %s' % os.getenv('TEST_REGISTRY'))
here = os.getcwd()
os.chdir('../scripts/designer/bin')
test_registry = ''
if args.test_registry:
test_registry = '-t'
cmd = './create_image.sh grader %s' % test_registry
os.system(cmd)
os.chdir(here)
src_path = '../'
labtainer_config_file = os.path.join(src_path, 'config', 'labtainer.config')
logger = LabtainerLogging.LabtainerLogging("publish_grader.log", 'publish', labtainer_config_file)
labutils.logger = logger
labtainer_config = ParseLabtainerConfig.ParseLabtainerConfig(labtainer_config_file, logger)
if args.test_registry:
registry = labtainer_config.test_registry
else:
registry = labtainer_config.default_registry
dfile_path = '../scripts/designer/base_dockerfiles/Dockerfile.labtainer.grader'
image_base = VersionInfo.getFrom(dfile_path, registry)
base_id = VersionInfo.getImageId(image_base, True)
framework_version = labutils.framework_version
relabel('labtainer.grader', framework_version, image_base, base_id, registry)
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
srottem/indy-sdk | wrappers/python/tests/pairwise/test_set_pairwise_metadata.py | 2 | 2428 | from indy import IndyError
from indy import pairwise
import pytest
import json
from indy.error import ErrorCode
@pytest.mark.asyncio
async def test_set_pairwise_metadata_works(wallet_handle, identity_my2, identity_trustee1, metadata):
(my_did, _) = identity_my2
(their_did, _) = identity_trustee1
await pairwise.create_pairwise(wallet_handle, their_did, my_did, None)
pairwise_without_metadata = await pairwise.get_pairwise(wallet_handle, their_did)
await pairwise.set_pairwise_metadata(wallet_handle, their_did, metadata)
pairwise_with_metadata = await pairwise.get_pairwise(wallet_handle, their_did)
assert pairwise_without_metadata != pairwise_with_metadata
assert {'my_did': my_did, 'metadata': metadata} == json.loads(pairwise_with_metadata)
@pytest.mark.asyncio
async def test_set_pairwise_metadata_works_for_reset(wallet_handle, identity_my2, identity_trustee1, metadata):
(my_did, _) = identity_my2
(their_did, _) = identity_trustee1
await pairwise.create_pairwise(wallet_handle, their_did, my_did, metadata)
pairwise_with_metadata = await pairwise.get_pairwise(wallet_handle, their_did)
assert {'my_did': my_did, 'metadata': metadata} == json.loads(pairwise_with_metadata)
await pairwise.set_pairwise_metadata(wallet_handle, their_did, None)
pairwise_without_metadata = await pairwise.get_pairwise(wallet_handle, their_did)
assert {'my_did': my_did} == json.loads(pairwise_without_metadata)
assert pairwise_without_metadata != pairwise_with_metadata
@pytest.mark.asyncio
async def test_set_pairwise_metadata_works_for_not_created_pairwise(wallet_handle, identity_trustee1, metadata):
(their_did, _) = identity_trustee1
with pytest.raises(IndyError) as e:
await pairwise.set_pairwise_metadata(wallet_handle, their_did, metadata)
assert ErrorCode.WalletItemNotFound == e.value.error_code
@pytest.mark.asyncio
async def test_set_pairwise_metadata_works_for_invalid_handle(wallet_handle, identity_my2, identity_trustee1, metadata):
(my_did, _) = identity_my2
(their_did, _) = identity_trustee1
await pairwise.create_pairwise(wallet_handle, their_did, my_did, None)
with pytest.raises(IndyError) as e:
invalid_wallet_handle = wallet_handle + 1
await pairwise.set_pairwise_metadata(invalid_wallet_handle, their_did, metadata)
assert ErrorCode.WalletInvalidHandle == e.value.error_code
| apache-2.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/gtts/tts.py | 4 | 6388 | # -*- coding: utf-8 -*-
import re, requests, warnings
from six.moves import urllib
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from gtts_token.gtts_token import Token
class gTTS:
""" gTTS (Google Text to Speech): an interface to Google's Text to Speech API """
# Google TTS API supports two read speeds
# (speed <= 0.3: slow; speed > 0.3: normal; default: 1)
class Speed:
SLOW = 0.3
NORMAL = 1
GOOGLE_TTS_URL = 'https://translate.google.com/translate_tts'
MAX_CHARS = 100 # Max characters the Google TTS API takes at a time
LANGUAGES = {
'af' : 'Afrikaans',
'sq' : 'Albanian',
'ar' : 'Arabic',
'hy' : 'Armenian',
'bn' : 'Bengali',
'ca' : 'Catalan',
'zh' : 'Chinese',
'zh-cn' : 'Chinese (Mandarin/China)',
'zh-tw' : 'Chinese (Mandarin/Taiwan)',
'zh-yue' : 'Chinese (Cantonese)',
'hr' : 'Croatian',
'cs' : 'Czech',
'da' : 'Danish',
'nl' : 'Dutch',
'en' : 'English',
'en-au' : 'English (Australia)',
'en-uk' : 'English (United Kingdom)',
'en-us' : 'English (United States)',
'eo' : 'Esperanto',
'fi' : 'Finnish',
'fr' : 'French',
'de' : 'German',
'el' : 'Greek',
'hi' : 'Hindi',
'hu' : 'Hungarian',
'is' : 'Icelandic',
'id' : 'Indonesian',
'it' : 'Italian',
'ja' : 'Japanese',
'km' : 'Khmer (Cambodian)',
'ko' : 'Korean',
'la' : 'Latin',
'lv' : 'Latvian',
'mk' : 'Macedonian',
'no' : 'Norwegian',
'pl' : 'Polish',
'pt' : 'Portuguese',
'ro' : 'Romanian',
'ru' : 'Russian',
'sr' : 'Serbian',
'si' : 'Sinhala',
'sk' : 'Slovak',
'es' : 'Spanish',
'es-es' : 'Spanish (Spain)',
'es-us' : 'Spanish (United States)',
'sw' : 'Swahili',
'sv' : 'Swedish',
'ta' : 'Tamil',
'th' : 'Thai',
'tr' : 'Turkish',
'uk' : 'Ukrainian',
'vi' : 'Vietnamese',
'cy' : 'Welsh'
}
def __init__(self, text, lang = 'en', slow = False, debug = False):
self.debug = debug
if lang.lower() not in self.LANGUAGES:
raise Exception('Language not supported: %s' % lang)
else:
self.lang = lang.lower()
if not text:
raise Exception('No text to speak')
else:
self.text = text
# Read speed
if slow:
self.speed = self.Speed().SLOW
else:
self.speed = self.Speed().NORMAL
# Split text in parts
if self._len(text) <= self.MAX_CHARS:
text_parts = [text]
else:
text_parts = self._tokenize(text, self.MAX_CHARS)
# Clean
def strip(x): return x.replace('\n', '').strip()
text_parts = [strip(x) for x in text_parts]
text_parts = [x for x in text_parts if len(x) > 0]
self.text_parts = text_parts
# Google Translate token
self.token = Token()
def save(self, savefile):
""" Do the Web request and save to `savefile` """
with open(savefile, 'wb') as f:
self.write_to_fp(f)
def write_to_fp(self, fp):
""" Do the Web request and save to a file-like object """
for idx, part in enumerate(self.text_parts):
payload = { 'ie' : 'UTF-8',
'q' : part,
'tl' : self.lang,
'ttsspeed' : self.speed,
'total' : len(self.text_parts),
'idx' : idx,
'client' : 'tw-ob',
'textlen' : self._len(part),
'tk' : self.token.calculate_token(part)}
headers = {
"Referer" : "http://translate.google.com/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"
}
if self.debug: print(payload)
try:
# Disable requests' ssl verify to accomodate certain proxies and firewalls
# Filter out urllib3's insecure warnings. We can live without ssl verify here
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
r = requests.get(self.GOOGLE_TTS_URL,
params=payload,
headers=headers,
proxies=urllib.request.getproxies(),
verify=False)
if self.debug:
print("Headers: {}".format(r.request.headers))
print("Request url: {}".format(r.request.url))
print("Response: {}, Redirects: {}".format(r.status_code, r.history))
r.raise_for_status()
for chunk in r.iter_content(chunk_size=1024):
fp.write(chunk)
except Exception as e:
raise
def _len(self, text):
""" Get char len of `text`, after decoding if Python 2 """
try:
# Python 2
return len(text.decode('utf8'))
except AttributeError:
# Python 3
return len(text)
def _tokenize(self, text, max_size):
""" Tokenizer on basic roman punctuation """
punc = "¡!()[]¿?.,;:—«»\n"
punc_list = [re.escape(c) for c in punc]
pattern = '|'.join(punc_list)
parts = re.split(pattern, text)
min_parts = []
for p in parts:
min_parts += self._minimize(p, " ", max_size)
return min_parts
def _minimize(self, thestring, delim, max_size):
""" Recursive function that splits `thestring` in chunks
of maximum `max_size` chars delimited by `delim`. Returns list. """
if self._len(thestring) > max_size:
idx = thestring.rfind(delim, 0, max_size)
return [thestring[:idx]] + self._minimize(thestring[idx:], delim, max_size)
else:
return [thestring]
if __name__ == "__main__":
pass
| gpl-3.0 |
ulikoehler/UliEngineering | UliEngineering/SignalProcessing/Resampling.py | 1 | 9018 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Utilities for selecting and finding specific attributes in datasets
"""
import math
import functools
import numpy as np
import bisect
import concurrent.futures
import scipy.interpolate
from UliEngineering.Utils.Concurrency import QueuedThreadExecutor
from .Utils import LinRange
__all__ = ["resample_discard", "resampled_timespace",
"parallel_resample", "signal_samplerate",
"serial_resample"]
def signal_samplerate(t, ignore_percentile=10, mean_method=np.mean):
"""
Compute the samplerate of a signal
using a quantile-based method to exclude
outliers (in the time delta domain) and
computes the by 1 / mean
Using a low ignore_percentile value is only
desirable if the dataset is small and therefore
does not average properly due to lack of samples.
In most cases, using a high ignore percentile
like 10 is recommended.
Returns a float (samplerate) [1/s].
If t is a LinRange() object, returns t.samplerate()
Parameters
----------
t : numpy array of datetime64 type (or LinRange)
Timestamps associated with the signal
ignore_percentile : number
This percentile of outliers is ignored
for the mean calculation at both the top
and the bottom end.
"5" means considering the 5th...95th percentile
for averaging.
mean_method : unary function
Used to compute the mean after excluding outliers.
Except for special usecases, arithmetic mean (np.mean)
is recommended.
"""
# Special rule for LinRange objects that have a defined samplerate
if isinstance(t, LinRange):
return t.samplerate()
tdelta = np.diff(t)
above = np.percentile(tdelta, ignore_percentile)
below = np.percentile(tdelta, 100 - ignore_percentile)
filtered = tdelta[np.logical_and(tdelta >= above, tdelta <= below)]
# Filtered is too small if the sample periods are too uniform in the array
if len(filtered) < 0.1 * len(tdelta):
filtered = tdelta
mean_sample_period = mean_method(filtered)
mean_sample_period = mean_sample_period.astype("timedelta64[ns]").astype(np.int64)
return 1e9 / mean_sample_period # 1e9 : nanoseconds
def resample_discard(arr, divisor, ofs=0):
"""
Resample with an integral divisor, discarding all other samples.
Returns a view of the data.
Very fast as this doesn't need to read the data.
"""
return arr[ofs::divisor]
def resampled_timespace(t, new_samplerate, assume_sorted=True, time_factor=1e6):
"""
Compute the new timespace after resampling a input timestamp array
(not neccessarily lazy)
Parameters
----------
t : numpy array-like
The source timestamps.
If these are numbers, you must supply time_factor to
specify the resolution of the number.
If they are
new_samplerate : float
The new datarate in Hz
assume_sorted : bool
If this is True, the code assumes the source
timestamp array is monotonically increasing, i.e.
the lowest timestamp comes first and the highest last.
If this is False, the code determines
the min/max value by reading the entire array.
time_factor : float
Ignored if t is of dtype datetime64
Defines what timestamps in the source (and result)
array means. This is required to interpret new_samplerate.
If time_factor=1e6, it means that a difference of 1.0
in two timestamps means a difference of 1/1e6 seconds.
Returns
-------
A LinSpace() (acts like a numpy array but doesn't consume any memory)
that represents the new timespace
news
"""
if len(t) == 0:
raise ValueError("Empty time array given - can not perform any resampling")
if len(t) == 1:
raise ValueError("Time array has only one value - can not perform any resampling")
# Handle numpy datetime64 input
if "datetime64" in t.dtype.name:
t = t.astype('datetime64[ns]').astype(np.int64)
time_factor = 1e9
# Compute time endpoints
dst_tdelta = time_factor / new_samplerate
startt, endt = (t[0], t[-1]) if assume_sorted else (np.min(t), np.max(t))
src_tdelta = endt - startt
if src_tdelta < dst_tdelta:
raise ValueError("The time delta is smaller than a single sample - can not perform resampling")
# Use a lazy linrange to represent time interval
return LinRange.range(startt, endt, dst_tdelta)
def __parallel_resample_worker(torig, tnew, y, out, i, chunksize, ovp_size, prefilter, fitkind):
# Find the time range in the target time
t_target = tnew[i:i + chunksize]
# Find the time range in the source time
srcstart = bisect.bisect_left(torig, t_target[0])
srcend = bisect.bisect_right(torig, t_target[1])
# Compute start and end index with overprovisioning
# This might be out of range of the src array but bisect will ignore that
srcstart_ovp = max(0, srcstart - ovp_size) # Must not get negative indices
srcend_ovp = srcend - ovp_size
# Compute source slices
tsrc_chunk = torig[srcstart_ovp:srcend_ovp]
ysrc_chunk = y[srcstart_ovp:srcend_ovp]
# Perform prefilter
if prefilter is not None:
tsrc_chunk, ysrc_chunk = prefilter(tsrc_chunk, ysrc_chunk)
# Compute interpolating spline (might also be piecewise linear)...
fit = scipy.interpolate.interp1d(tsrc_chunk, ysrc_chunk, fitkind=fitkind)
# ... and evaluate
out[i:i + chunksize] = fit(t_target)
def serial_resample(t, y, new_samplerate, out=None, prefilter=None,
time_factor=1e6,
fitkind='linear', chunksize=10000,
overprovisioning_factor=0.01):
"""
A resampler that uses scipy.interpolate.interp1d but splits the
input into chunks that can be processed.
The chunksize is applied to the output timebase.
The input x array is assumed to be sorted, facilitating binary search.
If the output array is not given, it is automatically allocated with the correct size.
The chunk workers are executed in parallel in a concurrent.futures thread pool.
In order to account for vector end effects, an overprovisioning factor
can be provided so that a fraction of the chunksize is added at both ends of
the source chunk.
This
A overprovisioning factor of 0.01 means that 1% of the chunksize is added on the left
and 1% is added on the right. This does not affect leftmost and rightmost
border of the input array.
Returns the output array.
Applies an optional prefilter to the input data while resampling. If the timebase of
the input data is off significantly, this might produce unexpected results.
The prefilter must be a reentrant functor that takes (t, x) data and returns
a (t, x) tuple. The returned tuple can be of arbitrary size (assuming t and x
have the same length) but its t range must include the t range that is being interpolated.
Note that the prefilter is performed after overprovisioning, so setting a higher
overprovisioning factor (see below) might help dealing with prefilters that
return too small arrays, however at the start and the end of the input array,
no overprovisioning values can be added.
"""
new_t = resampled_timespace(t, new_samplerate, time_factor=time_factor)
# Lazily compute the new timespan
if out is None:
out = np.zeros(len(new_t))
ovp_size = int(math.floor(overprovisioning_factor * chunksize))
# How many chunks do we have to process?
for i in range(len(new_t) // chunksize):
__parallel_resample_worker(i=i, orig=t, tnew=new_t,
y=y, out=out, chunksize=chunksize,
ovp_size=ovp_size, prefilter=prefilter,
fitkind=fitkind)
return out
def parallel_resample(t, y, new_samplerate, out=None, prefilter=None,
executor=None, time_factor=1e6,
fitkind='linear', chunksize=10000,
overprovisioning_factor=0.01):
"""
Parallel variant of serial_resample
"""
new_t = resampled_timespace(t, new_samplerate, time_factor=time_factor)
# Lazily compute the new timespan
if out is None:
out = np.zeros(len(new_t))
if executor is None:
executor = QueuedThreadExecutor()
ovp_size = int(math.floor(overprovisioning_factor * chunksize))
# How many chunks do we have to process?
numchunks = len(new_t) // chunksize
# Bind constant arguments
f = functools.partial(__parallel_resample_worker, torig=t, tnew=new_t,
y=y, out=out, chunksize=chunksize,
ovp_size=ovp_size, prefilter=prefilter,
fitkind=fitkind)
futures = [executor.submit(f, i=i) for i in range(numchunks)]
# Wait for futures to finish
concurrent.futures.wait(futures)
return out
| apache-2.0 |
DavidNorman/tensorflow | tensorflow/python/training/adagrad_da.py | 15 | 7491 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adagrad Dual Averaging for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.AdagradDAOptimizer"])
class AdagradDAOptimizer(optimizer.Optimizer):
"""Adagrad Dual Averaging algorithm for sparse linear models.
See this [paper](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
This optimizer takes care of regularization of unseen features in a mini batch
by updating them when they are seen with a closed form update rule that is
equivalent to having updated them on every mini-batch.
AdagradDA is typically used when there is a need for large sparsity in the
trained model. This optimizer only guarantees sparsity for linear models. Be
careful when using AdagradDA for deep networks as it will require careful
initialization of the gradient accumulators for it to train.
"""
def __init__(self,
learning_rate,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
use_locking=False,
name="AdagradDA"):
"""Construct a new AdagradDA optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
global_step: A `Tensor` containing the current training step number.
initial_gradient_squared_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "AdagradDA".
Raises:
ValueError: If the `initial_gradient_squared_accumulator_value` is
invalid.
"""
if initial_gradient_squared_accumulator_value <= 0.0:
raise ValueError("initial_gradient_squared_accumulator_value must be "
"positive: %s" %
initial_gradient_squared_accumulator_value)
super(AdagradDAOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._initial_gradient_squared_accumulator_value = (
initial_gradient_squared_accumulator_value)
# Created in Initialize.
self._learning_rate_tensor = None
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._global_step = global_step
self._global_step_on_worker = None
def _create_slots(self, var_list):
for v in var_list:
with ops.colocate_with(v):
g_val = constant_op.constant(
0.0, shape=v.get_shape(), dtype=v.dtype.base_dtype)
gg_val = constant_op.constant(
self._initial_gradient_squared_accumulator_value,
shape=v.get_shape(),
dtype=v.dtype.base_dtype)
self._get_or_make_slot(v, g_val, "gradient_accumulator", self._name)
self._get_or_make_slot(v, gg_val, "gradient_squared_accumulator",
self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(
self._learning_rate, name="learning_rate")
# Performance optimization so that worker creates a copy of the global step
# to avoid overloading the parameter server holding the global step.
with ops.colocate_with(self._learning_rate_tensor):
self._global_step_on_worker = array_ops.identity(self._global_step) + 1
def _apply_dense(self, grad, var):
g_acc = self.get_slot(var, "gradient_accumulator")
gg_acc = self.get_slot(var, "gradient_squared_accumulator")
with ops.device(var.device):
global_step = array_ops.identity(self._global_step_on_worker)
return training_ops.apply_adagrad_da(
var,
g_acc,
gg_acc,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength, var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength, var.dtype.base_dtype),
global_step,
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
g_acc = self.get_slot(var, "gradient_accumulator")
gg_acc = self.get_slot(var, "gradient_squared_accumulator")
with ops.device(var.device):
global_step = array_ops.identity(self._global_step_on_worker)
return training_ops.resource_apply_adagrad_da(
var.handle,
g_acc.handle,
gg_acc.handle,
grad,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength, grad.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength, grad.dtype.base_dtype),
global_step,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
g_acc = self.get_slot(var, "gradient_accumulator")
gg_acc = self.get_slot(var, "gradient_squared_accumulator")
with ops.device(var.device):
global_step = array_ops.identity(self._global_step_on_worker)
return training_ops.sparse_apply_adagrad_da(
var,
g_acc,
gg_acc,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength, var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength, var.dtype.base_dtype),
global_step,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
g_acc = self.get_slot(var, "gradient_accumulator")
gg_acc = self.get_slot(var, "gradient_squared_accumulator")
with ops.device(var.device):
global_step = array_ops.identity(self._global_step_on_worker)
return training_ops.resource_sparse_apply_adagrad_da(
var.handle,
g_acc.handle,
gg_acc.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength, grad.dtype),
math_ops.cast(self._l2_regularization_strength, grad.dtype),
global_step,
use_locking=self._use_locking)
| apache-2.0 |
lokeshjindal15/gem5_transform | src/arch/x86/isa/insts/general_purpose/data_transfer/xchg.py | 89 | 3288 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# All the memory versions need to use LOCK, regardless of if it was set
def macroop XCHG_R_R
{
# Use the xor trick instead of moves to reduce register pressure.
# This probably doesn't make much of a difference, but it's easy.
xor reg, reg, regm
xor regm, regm, reg
xor reg, reg, regm
};
def macroop XCHG_R_M
{
mfence
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_R_P
{
rdip t7
mfence
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_M_R
{
mfence
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_P_R
{
rdip t7
mfence
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_LOCKED_M_R
{
mfence
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_LOCKED_P_R
{
rdip t7
mfence
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mfence
mov reg, reg, t1
};
'''
| bsd-3-clause |
Azure/azure-sdk-for-python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2016_08_01/models/_models_py3.py | 1 | 295755 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
import msrest.serialization
from ._web_site_management_client_enums import *
class ApiDefinitionInfo(msrest.serialization.Model):
"""Information about the formal API definition for the app.
:param url: The URL of the API definition.
:type url: str
"""
_attribute_map = {
'url': {'key': 'url', 'type': 'str'},
}
def __init__(
self,
*,
url: Optional[str] = None,
**kwargs
):
super(ApiDefinitionInfo, self).__init__(**kwargs)
self.url = url
class ApplicationLogsConfig(msrest.serialization.Model):
"""Application logs configuration.
:param file_system: Application logs to file system configuration.
:type file_system: ~azure.mgmt.web.v2016_08_01.models.FileSystemApplicationLogsConfig
:param azure_table_storage: Application logs to azure table storage configuration.
:type azure_table_storage:
~azure.mgmt.web.v2016_08_01.models.AzureTableStorageApplicationLogsConfig
:param azure_blob_storage: Application logs to blob storage configuration.
:type azure_blob_storage:
~azure.mgmt.web.v2016_08_01.models.AzureBlobStorageApplicationLogsConfig
"""
_attribute_map = {
'file_system': {'key': 'fileSystem', 'type': 'FileSystemApplicationLogsConfig'},
'azure_table_storage': {'key': 'azureTableStorage', 'type': 'AzureTableStorageApplicationLogsConfig'},
'azure_blob_storage': {'key': 'azureBlobStorage', 'type': 'AzureBlobStorageApplicationLogsConfig'},
}
def __init__(
self,
*,
file_system: Optional["FileSystemApplicationLogsConfig"] = None,
azure_table_storage: Optional["AzureTableStorageApplicationLogsConfig"] = None,
azure_blob_storage: Optional["AzureBlobStorageApplicationLogsConfig"] = None,
**kwargs
):
super(ApplicationLogsConfig, self).__init__(**kwargs)
self.file_system = file_system
self.azure_table_storage = azure_table_storage
self.azure_blob_storage = azure_blob_storage
class AutoHealActions(msrest.serialization.Model):
"""Actions which to take by the auto-heal module when a rule is triggered.
:param action_type: Predefined action to be taken. Possible values include: "Recycle",
"LogEvent", "CustomAction".
:type action_type: str or ~azure.mgmt.web.v2016_08_01.models.AutoHealActionType
:param custom_action: Custom action to be taken.
:type custom_action: ~azure.mgmt.web.v2016_08_01.models.AutoHealCustomAction
:param min_process_execution_time: Minimum time the process must execute
before taking the action.
:type min_process_execution_time: str
"""
_attribute_map = {
'action_type': {'key': 'actionType', 'type': 'str'},
'custom_action': {'key': 'customAction', 'type': 'AutoHealCustomAction'},
'min_process_execution_time': {'key': 'minProcessExecutionTime', 'type': 'str'},
}
def __init__(
self,
*,
action_type: Optional[Union[str, "AutoHealActionType"]] = None,
custom_action: Optional["AutoHealCustomAction"] = None,
min_process_execution_time: Optional[str] = None,
**kwargs
):
super(AutoHealActions, self).__init__(**kwargs)
self.action_type = action_type
self.custom_action = custom_action
self.min_process_execution_time = min_process_execution_time
class AutoHealCustomAction(msrest.serialization.Model):
"""Custom action to be executed
when an auto heal rule is triggered.
:param exe: Executable to be run.
:type exe: str
:param parameters: Parameters for the executable.
:type parameters: str
"""
_attribute_map = {
'exe': {'key': 'exe', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'str'},
}
def __init__(
self,
*,
exe: Optional[str] = None,
parameters: Optional[str] = None,
**kwargs
):
super(AutoHealCustomAction, self).__init__(**kwargs)
self.exe = exe
self.parameters = parameters
class AutoHealRules(msrest.serialization.Model):
"""Rules that can be defined for auto-heal.
:param triggers: Conditions that describe when to execute the auto-heal actions.
:type triggers: ~azure.mgmt.web.v2016_08_01.models.AutoHealTriggers
:param actions: Actions to be executed when a rule is triggered.
:type actions: ~azure.mgmt.web.v2016_08_01.models.AutoHealActions
"""
_attribute_map = {
'triggers': {'key': 'triggers', 'type': 'AutoHealTriggers'},
'actions': {'key': 'actions', 'type': 'AutoHealActions'},
}
def __init__(
self,
*,
triggers: Optional["AutoHealTriggers"] = None,
actions: Optional["AutoHealActions"] = None,
**kwargs
):
super(AutoHealRules, self).__init__(**kwargs)
self.triggers = triggers
self.actions = actions
class AutoHealTriggers(msrest.serialization.Model):
"""Triggers for auto-heal.
:param requests: A rule based on total requests.
:type requests: ~azure.mgmt.web.v2016_08_01.models.RequestsBasedTrigger
:param private_bytes_in_kb: A rule based on private bytes.
:type private_bytes_in_kb: int
:param status_codes: A rule based on status codes.
:type status_codes: list[~azure.mgmt.web.v2016_08_01.models.StatusCodesBasedTrigger]
:param slow_requests: A rule based on request execution time.
:type slow_requests: ~azure.mgmt.web.v2016_08_01.models.SlowRequestsBasedTrigger
"""
_attribute_map = {
'requests': {'key': 'requests', 'type': 'RequestsBasedTrigger'},
'private_bytes_in_kb': {'key': 'privateBytesInKB', 'type': 'int'},
'status_codes': {'key': 'statusCodes', 'type': '[StatusCodesBasedTrigger]'},
'slow_requests': {'key': 'slowRequests', 'type': 'SlowRequestsBasedTrigger'},
}
def __init__(
self,
*,
requests: Optional["RequestsBasedTrigger"] = None,
private_bytes_in_kb: Optional[int] = None,
status_codes: Optional[List["StatusCodesBasedTrigger"]] = None,
slow_requests: Optional["SlowRequestsBasedTrigger"] = None,
**kwargs
):
super(AutoHealTriggers, self).__init__(**kwargs)
self.requests = requests
self.private_bytes_in_kb = private_bytes_in_kb
self.status_codes = status_codes
self.slow_requests = slow_requests
class AzureBlobStorageApplicationLogsConfig(msrest.serialization.Model):
"""Application logs azure blob storage configuration.
:param level: Log level. Possible values include: "Off", "Verbose", "Information", "Warning",
"Error".
:type level: str or ~azure.mgmt.web.v2016_08_01.models.LogLevel
:param sas_url: SAS url to a azure blob container with read/write/list/delete permissions.
:type sas_url: str
:param retention_in_days: Retention in days.
Remove blobs older than X days.
0 or lower means no retention.
:type retention_in_days: int
"""
_attribute_map = {
'level': {'key': 'level', 'type': 'str'},
'sas_url': {'key': 'sasUrl', 'type': 'str'},
'retention_in_days': {'key': 'retentionInDays', 'type': 'int'},
}
def __init__(
self,
*,
level: Optional[Union[str, "LogLevel"]] = None,
sas_url: Optional[str] = None,
retention_in_days: Optional[int] = None,
**kwargs
):
super(AzureBlobStorageApplicationLogsConfig, self).__init__(**kwargs)
self.level = level
self.sas_url = sas_url
self.retention_in_days = retention_in_days
class AzureBlobStorageHttpLogsConfig(msrest.serialization.Model):
"""Http logs to azure blob storage configuration.
:param sas_url: SAS url to a azure blob container with read/write/list/delete permissions.
:type sas_url: str
:param retention_in_days: Retention in days.
Remove blobs older than X days.
0 or lower means no retention.
:type retention_in_days: int
:param enabled: True if configuration is enabled, false if it is disabled and null if
configuration is not set.
:type enabled: bool
"""
_attribute_map = {
'sas_url': {'key': 'sasUrl', 'type': 'str'},
'retention_in_days': {'key': 'retentionInDays', 'type': 'int'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
sas_url: Optional[str] = None,
retention_in_days: Optional[int] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(AzureBlobStorageHttpLogsConfig, self).__init__(**kwargs)
self.sas_url = sas_url
self.retention_in_days = retention_in_days
self.enabled = enabled
class AzureTableStorageApplicationLogsConfig(msrest.serialization.Model):
"""Application logs to Azure table storage configuration.
All required parameters must be populated in order to send to Azure.
:param level: Log level. Possible values include: "Off", "Verbose", "Information", "Warning",
"Error".
:type level: str or ~azure.mgmt.web.v2016_08_01.models.LogLevel
:param sas_url: Required. SAS URL to an Azure table with add/query/delete permissions.
:type sas_url: str
"""
_validation = {
'sas_url': {'required': True},
}
_attribute_map = {
'level': {'key': 'level', 'type': 'str'},
'sas_url': {'key': 'sasUrl', 'type': 'str'},
}
def __init__(
self,
*,
sas_url: str,
level: Optional[Union[str, "LogLevel"]] = None,
**kwargs
):
super(AzureTableStorageApplicationLogsConfig, self).__init__(**kwargs)
self.level = level
self.sas_url = sas_url
class ProxyOnlyResource(msrest.serialization.Model):
"""Azure proxy only resource. This resource is not tracked by Azure Resource Manager.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(ProxyOnlyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.kind = kind
self.type = None
class BackupItem(ProxyOnlyResource):
"""Backup description.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar backup_id: Id of the backup.
:vartype backup_id: int
:ivar storage_account_url: SAS URL for the storage account container which contains this
backup.
:vartype storage_account_url: str
:ivar blob_name: Name of the blob which contains data for this backup.
:vartype blob_name: str
:ivar name_properties_name: Name of this backup.
:vartype name_properties_name: str
:ivar status: Backup status. Possible values include: "InProgress", "Failed", "Succeeded",
"TimedOut", "Created", "Skipped", "PartiallySucceeded", "DeleteInProgress", "DeleteFailed",
"Deleted".
:vartype status: str or ~azure.mgmt.web.v2016_08_01.models.BackupItemStatus
:ivar size_in_bytes: Size of the backup in bytes.
:vartype size_in_bytes: long
:ivar created: Timestamp of the backup creation.
:vartype created: ~datetime.datetime
:ivar log: Details regarding this backup. Might contain an error message.
:vartype log: str
:ivar databases: List of databases included in the backup.
:vartype databases: list[~azure.mgmt.web.v2016_08_01.models.DatabaseBackupSetting]
:ivar scheduled: True if this backup has been created due to a schedule being triggered.
:vartype scheduled: bool
:ivar last_restore_time_stamp: Timestamp of a last restore operation which used this backup.
:vartype last_restore_time_stamp: ~datetime.datetime
:ivar finished_time_stamp: Timestamp when this backup finished.
:vartype finished_time_stamp: ~datetime.datetime
:ivar correlation_id: Unique correlation identifier. Please use this along with the timestamp
while communicating with Azure support.
:vartype correlation_id: str
:ivar website_size_in_bytes: Size of the original web app which has been backed up.
:vartype website_size_in_bytes: long
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'backup_id': {'readonly': True},
'storage_account_url': {'readonly': True},
'blob_name': {'readonly': True},
'name_properties_name': {'readonly': True},
'status': {'readonly': True},
'size_in_bytes': {'readonly': True},
'created': {'readonly': True},
'log': {'readonly': True},
'databases': {'readonly': True},
'scheduled': {'readonly': True},
'last_restore_time_stamp': {'readonly': True},
'finished_time_stamp': {'readonly': True},
'correlation_id': {'readonly': True},
'website_size_in_bytes': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'backup_id': {'key': 'properties.id', 'type': 'int'},
'storage_account_url': {'key': 'properties.storageAccountUrl', 'type': 'str'},
'blob_name': {'key': 'properties.blobName', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'size_in_bytes': {'key': 'properties.sizeInBytes', 'type': 'long'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'log': {'key': 'properties.log', 'type': 'str'},
'databases': {'key': 'properties.databases', 'type': '[DatabaseBackupSetting]'},
'scheduled': {'key': 'properties.scheduled', 'type': 'bool'},
'last_restore_time_stamp': {'key': 'properties.lastRestoreTimeStamp', 'type': 'iso-8601'},
'finished_time_stamp': {'key': 'properties.finishedTimeStamp', 'type': 'iso-8601'},
'correlation_id': {'key': 'properties.correlationId', 'type': 'str'},
'website_size_in_bytes': {'key': 'properties.websiteSizeInBytes', 'type': 'long'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(BackupItem, self).__init__(kind=kind, **kwargs)
self.backup_id = None
self.storage_account_url = None
self.blob_name = None
self.name_properties_name = None
self.status = None
self.size_in_bytes = None
self.created = None
self.log = None
self.databases = None
self.scheduled = None
self.last_restore_time_stamp = None
self.finished_time_stamp = None
self.correlation_id = None
self.website_size_in_bytes = None
class BackupItemCollection(msrest.serialization.Model):
"""Collection of backup items.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.BackupItem]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[BackupItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["BackupItem"],
**kwargs
):
super(BackupItemCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class BackupRequest(ProxyOnlyResource):
"""Description of a backup which will be performed.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param backup_request_name: Name of the backup.
:type backup_request_name: str
:param enabled: True if the backup schedule is enabled (must be included in that case), false
if the backup schedule should be disabled.
:type enabled: bool
:param storage_account_url: SAS URL to the container.
:type storage_account_url: str
:param backup_schedule: Schedule for the backup if it is executed periodically.
:type backup_schedule: ~azure.mgmt.web.v2016_08_01.models.BackupSchedule
:param databases: Databases included in the backup.
:type databases: list[~azure.mgmt.web.v2016_08_01.models.DatabaseBackupSetting]
:param type_properties_type: Type of the backup. Possible values include: "Default", "Clone",
"Relocation", "Snapshot".
:type type_properties_type: str or
~azure.mgmt.web.v2016_08_01.models.BackupRestoreOperationType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'backup_request_name': {'key': 'properties.name', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'storage_account_url': {'key': 'properties.storageAccountUrl', 'type': 'str'},
'backup_schedule': {'key': 'properties.backupSchedule', 'type': 'BackupSchedule'},
'databases': {'key': 'properties.databases', 'type': '[DatabaseBackupSetting]'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
backup_request_name: Optional[str] = None,
enabled: Optional[bool] = None,
storage_account_url: Optional[str] = None,
backup_schedule: Optional["BackupSchedule"] = None,
databases: Optional[List["DatabaseBackupSetting"]] = None,
type_properties_type: Optional[Union[str, "BackupRestoreOperationType"]] = None,
**kwargs
):
super(BackupRequest, self).__init__(kind=kind, **kwargs)
self.backup_request_name = backup_request_name
self.enabled = enabled
self.storage_account_url = storage_account_url
self.backup_schedule = backup_schedule
self.databases = databases
self.type_properties_type = type_properties_type
class BackupSchedule(msrest.serialization.Model):
"""Description of a backup schedule. Describes how often should be the backup performed and what should be the retention policy.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param frequency_interval: Required. How often the backup should be executed (e.g. for weekly
backup, this should be set to 7 and FrequencyUnit should be set to Day).
:type frequency_interval: int
:param frequency_unit: Required. The unit of time for how often the backup should be executed
(e.g. for weekly backup, this should be set to Day and FrequencyInterval should be set to 7).
Possible values include: "Day", "Hour". Default value: "Day".
:type frequency_unit: str or ~azure.mgmt.web.v2016_08_01.models.FrequencyUnit
:param keep_at_least_one_backup: Required. True if the retention policy should always keep at
least one backup in the storage account, regardless how old it is; false otherwise.
:type keep_at_least_one_backup: bool
:param retention_period_in_days: Required. After how many days backups should be deleted.
:type retention_period_in_days: int
:param start_time: When the schedule should start working.
:type start_time: ~datetime.datetime
:ivar last_execution_time: Last time when this schedule was triggered.
:vartype last_execution_time: ~datetime.datetime
"""
_validation = {
'frequency_interval': {'required': True},
'frequency_unit': {'required': True},
'keep_at_least_one_backup': {'required': True},
'retention_period_in_days': {'required': True},
'last_execution_time': {'readonly': True},
}
_attribute_map = {
'frequency_interval': {'key': 'frequencyInterval', 'type': 'int'},
'frequency_unit': {'key': 'frequencyUnit', 'type': 'str'},
'keep_at_least_one_backup': {'key': 'keepAtLeastOneBackup', 'type': 'bool'},
'retention_period_in_days': {'key': 'retentionPeriodInDays', 'type': 'int'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_execution_time': {'key': 'lastExecutionTime', 'type': 'iso-8601'},
}
def __init__(
self,
*,
frequency_interval: int = 7,
frequency_unit: Union[str, "FrequencyUnit"] = "Day",
keep_at_least_one_backup: bool = True,
retention_period_in_days: int = 30,
start_time: Optional[datetime.datetime] = None,
**kwargs
):
super(BackupSchedule, self).__init__(**kwargs)
self.frequency_interval = frequency_interval
self.frequency_unit = frequency_unit
self.keep_at_least_one_backup = keep_at_least_one_backup
self.retention_period_in_days = retention_period_in_days
self.start_time = start_time
self.last_execution_time = None
class CloningInfo(msrest.serialization.Model):
"""Information needed for cloning operation.
All required parameters must be populated in order to send to Azure.
:param correlation_id: Correlation ID of cloning operation. This ID ties multiple cloning
operations
together to use the same snapshot.
:type correlation_id: str
:param overwrite: :code:`<code>true</code>` to overwrite destination app; otherwise,
:code:`<code>false</code>`.
:type overwrite: bool
:param clone_custom_host_names: :code:`<code>true</code>` to clone custom hostnames from source
app; otherwise, :code:`<code>false</code>`.
:type clone_custom_host_names: bool
:param clone_source_control: :code:`<code>true</code>` to clone source control from source app;
otherwise, :code:`<code>false</code>`.
:type clone_source_control: bool
:param source_web_app_id: Required. ARM resource ID of the source app. App resource ID is of
the form
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}
for production slots and
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slotName}
for other slots.
:type source_web_app_id: str
:param hosting_environment: App Service Environment.
:type hosting_environment: str
:param app_settings_overrides: Application setting overrides for cloned app. If specified,
these settings override the settings cloned
from source app. Otherwise, application settings from source app are retained.
:type app_settings_overrides: dict[str, str]
:param configure_load_balancing: :code:`<code>true</code>` to configure load balancing for
source and destination app.
:type configure_load_balancing: bool
:param traffic_manager_profile_id: ARM resource ID of the Traffic Manager profile to use, if it
exists. Traffic Manager resource ID is of the form
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{profileName}.
:type traffic_manager_profile_id: str
:param traffic_manager_profile_name: Name of Traffic Manager profile to create. This is only
needed if Traffic Manager profile does not already exist.
:type traffic_manager_profile_name: str
:param ignore_quotas: :code:`<code>true</code>` if quotas should be ignored; otherwise,
:code:`<code>false</code>`.
:type ignore_quotas: bool
"""
_validation = {
'source_web_app_id': {'required': True},
}
_attribute_map = {
'correlation_id': {'key': 'correlationId', 'type': 'str'},
'overwrite': {'key': 'overwrite', 'type': 'bool'},
'clone_custom_host_names': {'key': 'cloneCustomHostNames', 'type': 'bool'},
'clone_source_control': {'key': 'cloneSourceControl', 'type': 'bool'},
'source_web_app_id': {'key': 'sourceWebAppId', 'type': 'str'},
'hosting_environment': {'key': 'hostingEnvironment', 'type': 'str'},
'app_settings_overrides': {'key': 'appSettingsOverrides', 'type': '{str}'},
'configure_load_balancing': {'key': 'configureLoadBalancing', 'type': 'bool'},
'traffic_manager_profile_id': {'key': 'trafficManagerProfileId', 'type': 'str'},
'traffic_manager_profile_name': {'key': 'trafficManagerProfileName', 'type': 'str'},
'ignore_quotas': {'key': 'ignoreQuotas', 'type': 'bool'},
}
def __init__(
self,
*,
source_web_app_id: str,
correlation_id: Optional[str] = None,
overwrite: Optional[bool] = None,
clone_custom_host_names: Optional[bool] = None,
clone_source_control: Optional[bool] = None,
hosting_environment: Optional[str] = None,
app_settings_overrides: Optional[Dict[str, str]] = None,
configure_load_balancing: Optional[bool] = None,
traffic_manager_profile_id: Optional[str] = None,
traffic_manager_profile_name: Optional[str] = None,
ignore_quotas: Optional[bool] = None,
**kwargs
):
super(CloningInfo, self).__init__(**kwargs)
self.correlation_id = correlation_id
self.overwrite = overwrite
self.clone_custom_host_names = clone_custom_host_names
self.clone_source_control = clone_source_control
self.source_web_app_id = source_web_app_id
self.hosting_environment = hosting_environment
self.app_settings_overrides = app_settings_overrides
self.configure_load_balancing = configure_load_balancing
self.traffic_manager_profile_id = traffic_manager_profile_id
self.traffic_manager_profile_name = traffic_manager_profile_name
self.ignore_quotas = ignore_quotas
class ConnectionStringDictionary(ProxyOnlyResource):
"""String dictionary resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param properties: Connection strings.
:type properties: dict[str, ~azure.mgmt.web.v2016_08_01.models.ConnStringValueTypePair]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{ConnStringValueTypePair}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
properties: Optional[Dict[str, "ConnStringValueTypePair"]] = None,
**kwargs
):
super(ConnectionStringDictionary, self).__init__(kind=kind, **kwargs)
self.properties = properties
class ConnStringInfo(msrest.serialization.Model):
"""Database connection string information.
:param name: Name of connection string.
:type name: str
:param connection_string: Connection string value.
:type connection_string: str
:param type: Type of database. Possible values include: "MySql", "SQLServer", "SQLAzure",
"Custom", "NotificationHub", "ServiceBus", "EventHub", "ApiHub", "DocDb", "RedisCache",
"PostgreSQL".
:type type: str or ~azure.mgmt.web.v2016_08_01.models.ConnectionStringType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
connection_string: Optional[str] = None,
type: Optional[Union[str, "ConnectionStringType"]] = None,
**kwargs
):
super(ConnStringInfo, self).__init__(**kwargs)
self.name = name
self.connection_string = connection_string
self.type = type
class ConnStringValueTypePair(msrest.serialization.Model):
"""Database connection string value to type pair.
All required parameters must be populated in order to send to Azure.
:param value: Required. Value of pair.
:type value: str
:param type: Required. Type of database. Possible values include: "MySql", "SQLServer",
"SQLAzure", "Custom", "NotificationHub", "ServiceBus", "EventHub", "ApiHub", "DocDb",
"RedisCache", "PostgreSQL".
:type type: str or ~azure.mgmt.web.v2016_08_01.models.ConnectionStringType
"""
_validation = {
'value': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
value: str,
type: Union[str, "ConnectionStringType"],
**kwargs
):
super(ConnStringValueTypePair, self).__init__(**kwargs)
self.value = value
self.type = type
class ContinuousWebJob(ProxyOnlyResource):
"""Continuous Web Job Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param status: Job status. Possible values include: "Initializing", "Starting", "Running",
"PendingRestart", "Stopped".
:type status: str or ~azure.mgmt.web.v2016_08_01.models.ContinuousWebJobStatus
:param detailed_status: Detailed status.
:type detailed_status: str
:param log_url: Log URL.
:type log_url: str
:ivar name_properties_name: Job name. Used as job identifier in ARM resource URI.
:vartype name_properties_name: str
:param run_command: Run command.
:type run_command: str
:param url: Job URL.
:type url: str
:param extra_info_url: Extra Info URL.
:type extra_info_url: str
:param job_type: Job type. Possible values include: "Continuous", "Triggered".
:type job_type: str or ~azure.mgmt.web.v2016_08_01.models.WebJobType
:param error: Error information.
:type error: str
:param using_sdk: Using SDK?.
:type using_sdk: bool
:param settings: Job settings.
:type settings: dict[str, any]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'name_properties_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'detailed_status': {'key': 'properties.detailedStatus', 'type': 'str'},
'log_url': {'key': 'properties.logUrl', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'run_command': {'key': 'properties.runCommand', 'type': 'str'},
'url': {'key': 'properties.url', 'type': 'str'},
'extra_info_url': {'key': 'properties.extraInfoUrl', 'type': 'str'},
'job_type': {'key': 'properties.jobType', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'str'},
'using_sdk': {'key': 'properties.usingSdk', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': '{object}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
status: Optional[Union[str, "ContinuousWebJobStatus"]] = None,
detailed_status: Optional[str] = None,
log_url: Optional[str] = None,
run_command: Optional[str] = None,
url: Optional[str] = None,
extra_info_url: Optional[str] = None,
job_type: Optional[Union[str, "WebJobType"]] = None,
error: Optional[str] = None,
using_sdk: Optional[bool] = None,
settings: Optional[Dict[str, Any]] = None,
**kwargs
):
super(ContinuousWebJob, self).__init__(kind=kind, **kwargs)
self.status = status
self.detailed_status = detailed_status
self.log_url = log_url
self.name_properties_name = None
self.run_command = run_command
self.url = url
self.extra_info_url = extra_info_url
self.job_type = job_type
self.error = error
self.using_sdk = using_sdk
self.settings = settings
class ContinuousWebJobCollection(msrest.serialization.Model):
"""Collection of Kudu continuous web job information elements.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.ContinuousWebJob]
:param next_link: Link to next page of resources.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ContinuousWebJob]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ContinuousWebJob"],
next_link: Optional[str] = None,
**kwargs
):
super(ContinuousWebJobCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class CorsSettings(msrest.serialization.Model):
"""Cross-Origin Resource Sharing (CORS) settings for the app.
:param allowed_origins: Gets or sets the list of origins that should be allowed to make
cross-origin
calls (for example: http://example.com:12345). Use "*" to allow all.
:type allowed_origins: list[str]
"""
_attribute_map = {
'allowed_origins': {'key': 'allowedOrigins', 'type': '[str]'},
}
def __init__(
self,
*,
allowed_origins: Optional[List[str]] = None,
**kwargs
):
super(CorsSettings, self).__init__(**kwargs)
self.allowed_origins = allowed_origins
class CsmPublishingProfileOptions(msrest.serialization.Model):
"""Publishing options for requested profile.
:param format: Name of the format. Valid values are:
FileZilla3
WebDeploy -- default
Ftp. Possible values include: "FileZilla3", "WebDeploy", "Ftp".
:type format: str or ~azure.mgmt.web.v2016_08_01.models.PublishingProfileFormat
"""
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
}
def __init__(
self,
*,
format: Optional[Union[str, "PublishingProfileFormat"]] = None,
**kwargs
):
super(CsmPublishingProfileOptions, self).__init__(**kwargs)
self.format = format
class CsmSlotEntity(msrest.serialization.Model):
"""Deployment slot parameters.
All required parameters must be populated in order to send to Azure.
:param target_slot: Required. Destination deployment slot during swap operation.
:type target_slot: str
:param preserve_vnet: Required. :code:`<code>true</code>` to preserve Virtual Network to the
slot during swap; otherwise, :code:`<code>false</code>`.
:type preserve_vnet: bool
"""
_validation = {
'target_slot': {'required': True},
'preserve_vnet': {'required': True},
}
_attribute_map = {
'target_slot': {'key': 'targetSlot', 'type': 'str'},
'preserve_vnet': {'key': 'preserveVnet', 'type': 'bool'},
}
def __init__(
self,
*,
target_slot: str,
preserve_vnet: bool,
**kwargs
):
super(CsmSlotEntity, self).__init__(**kwargs)
self.target_slot = target_slot
self.preserve_vnet = preserve_vnet
class CsmUsageQuota(msrest.serialization.Model):
"""Usage of the quota resource.
:param unit: Units of measurement for the quota resource.
:type unit: str
:param next_reset_time: Next reset time for the resource counter.
:type next_reset_time: ~datetime.datetime
:param current_value: The current value of the resource counter.
:type current_value: long
:param limit: The resource limit.
:type limit: long
:param name: Quota name.
:type name: ~azure.mgmt.web.v2016_08_01.models.LocalizableString
"""
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'next_reset_time': {'key': 'nextResetTime', 'type': 'iso-8601'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'LocalizableString'},
}
def __init__(
self,
*,
unit: Optional[str] = None,
next_reset_time: Optional[datetime.datetime] = None,
current_value: Optional[int] = None,
limit: Optional[int] = None,
name: Optional["LocalizableString"] = None,
**kwargs
):
super(CsmUsageQuota, self).__init__(**kwargs)
self.unit = unit
self.next_reset_time = next_reset_time
self.current_value = current_value
self.limit = limit
self.name = name
class CsmUsageQuotaCollection(msrest.serialization.Model):
"""Collection of CSM usage quotas.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.CsmUsageQuota]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[CsmUsageQuota]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["CsmUsageQuota"],
**kwargs
):
super(CsmUsageQuotaCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class CustomHostnameAnalysisResult(ProxyOnlyResource):
"""Custom domain analysis.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar is_hostname_already_verified: :code:`<code>true</code>` if hostname is already verified;
otherwise, :code:`<code>false</code>`.
:vartype is_hostname_already_verified: bool
:ivar custom_domain_verification_test: DNS verification test result. Possible values include:
"Passed", "Failed", "Skipped".
:vartype custom_domain_verification_test: str or
~azure.mgmt.web.v2016_08_01.models.DnsVerificationTestResult
:ivar custom_domain_verification_failure_info: Raw failure information if DNS verification
fails.
:vartype custom_domain_verification_failure_info:
~azure.mgmt.web.v2016_08_01.models.ErrorEntity
:ivar has_conflict_on_scale_unit: :code:`<code>true</code>` if there is a conflict on a scale
unit; otherwise, :code:`<code>false</code>`.
:vartype has_conflict_on_scale_unit: bool
:ivar has_conflict_across_subscription: :code:`<code>true</code>` if there is a conflict across
subscriptions; otherwise, :code:`<code>false</code>`.
:vartype has_conflict_across_subscription: bool
:ivar conflicting_app_resource_id: Name of the conflicting app on scale unit if it's within the
same subscription.
:vartype conflicting_app_resource_id: str
:param c_name_records: CName records controller can see for this hostname.
:type c_name_records: list[str]
:param txt_records: TXT records controller can see for this hostname.
:type txt_records: list[str]
:param a_records: A records controller can see for this hostname.
:type a_records: list[str]
:param alternate_c_name_records: Alternate CName records controller can see for this hostname.
:type alternate_c_name_records: list[str]
:param alternate_txt_records: Alternate TXT records controller can see for this hostname.
:type alternate_txt_records: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'is_hostname_already_verified': {'readonly': True},
'custom_domain_verification_test': {'readonly': True},
'custom_domain_verification_failure_info': {'readonly': True},
'has_conflict_on_scale_unit': {'readonly': True},
'has_conflict_across_subscription': {'readonly': True},
'conflicting_app_resource_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'is_hostname_already_verified': {'key': 'properties.isHostnameAlreadyVerified', 'type': 'bool'},
'custom_domain_verification_test': {'key': 'properties.customDomainVerificationTest', 'type': 'str'},
'custom_domain_verification_failure_info': {'key': 'properties.customDomainVerificationFailureInfo', 'type': 'ErrorEntity'},
'has_conflict_on_scale_unit': {'key': 'properties.hasConflictOnScaleUnit', 'type': 'bool'},
'has_conflict_across_subscription': {'key': 'properties.hasConflictAcrossSubscription', 'type': 'bool'},
'conflicting_app_resource_id': {'key': 'properties.conflictingAppResourceId', 'type': 'str'},
'c_name_records': {'key': 'properties.cNameRecords', 'type': '[str]'},
'txt_records': {'key': 'properties.txtRecords', 'type': '[str]'},
'a_records': {'key': 'properties.aRecords', 'type': '[str]'},
'alternate_c_name_records': {'key': 'properties.alternateCNameRecords', 'type': '[str]'},
'alternate_txt_records': {'key': 'properties.alternateTxtRecords', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
c_name_records: Optional[List[str]] = None,
txt_records: Optional[List[str]] = None,
a_records: Optional[List[str]] = None,
alternate_c_name_records: Optional[List[str]] = None,
alternate_txt_records: Optional[List[str]] = None,
**kwargs
):
super(CustomHostnameAnalysisResult, self).__init__(kind=kind, **kwargs)
self.is_hostname_already_verified = None
self.custom_domain_verification_test = None
self.custom_domain_verification_failure_info = None
self.has_conflict_on_scale_unit = None
self.has_conflict_across_subscription = None
self.conflicting_app_resource_id = None
self.c_name_records = c_name_records
self.txt_records = txt_records
self.a_records = a_records
self.alternate_c_name_records = alternate_c_name_records
self.alternate_txt_records = alternate_txt_records
class DatabaseBackupSetting(msrest.serialization.Model):
"""Database backup settings.
All required parameters must be populated in order to send to Azure.
:param database_type: Required. Database type (e.g. SqlAzure / MySql). Possible values include:
"SqlAzure", "MySql", "LocalMySql", "PostgreSql".
:type database_type: str or ~azure.mgmt.web.v2016_08_01.models.DatabaseType
:param name:
:type name: str
:param connection_string_name: Contains a connection string name that is linked to the
SiteConfig.ConnectionStrings.
This is used during restore with overwrite connection strings options.
:type connection_string_name: str
:param connection_string: Contains a connection string to a database which is being backed up
or restored. If the restore should happen to a new database, the database name inside is the
new one.
:type connection_string: str
"""
_validation = {
'database_type': {'required': True},
}
_attribute_map = {
'database_type': {'key': 'databaseType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'connection_string_name': {'key': 'connectionStringName', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
}
def __init__(
self,
*,
database_type: Union[str, "DatabaseType"],
name: Optional[str] = None,
connection_string_name: Optional[str] = None,
connection_string: Optional[str] = None,
**kwargs
):
super(DatabaseBackupSetting, self).__init__(**kwargs)
self.database_type = database_type
self.name = name
self.connection_string_name = connection_string_name
self.connection_string = connection_string
class Deployment(ProxyOnlyResource):
"""User credentials used for publishing activity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param id_properties_id: Identifier for deployment.
:type id_properties_id: str
:param status: Deployment status.
:type status: int
:param message: Details about deployment status.
:type message: str
:param author: Who authored the deployment.
:type author: str
:param deployer: Who performed the deployment.
:type deployer: str
:param author_email: Author email.
:type author_email: str
:param start_time: Start time.
:type start_time: ~datetime.datetime
:param end_time: End time.
:type end_time: ~datetime.datetime
:param active: True if deployment is currently active, false if completed and null if not
started.
:type active: bool
:param details: Details on deployment.
:type details: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'id_properties_id': {'key': 'properties.id', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'int'},
'message': {'key': 'properties.message', 'type': 'str'},
'author': {'key': 'properties.author', 'type': 'str'},
'deployer': {'key': 'properties.deployer', 'type': 'str'},
'author_email': {'key': 'properties.authorEmail', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'active': {'key': 'properties.active', 'type': 'bool'},
'details': {'key': 'properties.details', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
id_properties_id: Optional[str] = None,
status: Optional[int] = None,
message: Optional[str] = None,
author: Optional[str] = None,
deployer: Optional[str] = None,
author_email: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
active: Optional[bool] = None,
details: Optional[str] = None,
**kwargs
):
super(Deployment, self).__init__(kind=kind, **kwargs)
self.id_properties_id = id_properties_id
self.status = status
self.message = message
self.author = author
self.deployer = deployer
self.author_email = author_email
self.start_time = start_time
self.end_time = end_time
self.active = active
self.details = details
class DeploymentCollection(msrest.serialization.Model):
"""Collection of app deployments.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.Deployment]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Deployment]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Deployment"],
**kwargs
):
super(DeploymentCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class EnabledConfig(msrest.serialization.Model):
"""Enabled configuration.
:param enabled: True if configuration is enabled, false if it is disabled and null if
configuration is not set.
:type enabled: bool
"""
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
**kwargs
):
super(EnabledConfig, self).__init__(**kwargs)
self.enabled = enabled
class ErrorEntity(msrest.serialization.Model):
"""Body of the error response returned from the API.
:param extended_code: Type of error.
:type extended_code: str
:param message_template: Message template.
:type message_template: str
:param parameters: Parameters for the template.
:type parameters: list[str]
:param inner_errors: Inner errors.
:type inner_errors: list[~azure.mgmt.web.v2016_08_01.models.ErrorEntity]
:param code: Basic error code.
:type code: str
:param message: Any details of the error.
:type message: str
"""
_attribute_map = {
'extended_code': {'key': 'extendedCode', 'type': 'str'},
'message_template': {'key': 'messageTemplate', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '[str]'},
'inner_errors': {'key': 'innerErrors', 'type': '[ErrorEntity]'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
extended_code: Optional[str] = None,
message_template: Optional[str] = None,
parameters: Optional[List[str]] = None,
inner_errors: Optional[List["ErrorEntity"]] = None,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ErrorEntity, self).__init__(**kwargs)
self.extended_code = extended_code
self.message_template = message_template
self.parameters = parameters
self.inner_errors = inner_errors
self.code = code
self.message = message
class Experiments(msrest.serialization.Model):
"""Routing rules in production experiments.
:param ramp_up_rules: List of ramp-up rules.
:type ramp_up_rules: list[~azure.mgmt.web.v2016_08_01.models.RampUpRule]
"""
_attribute_map = {
'ramp_up_rules': {'key': 'rampUpRules', 'type': '[RampUpRule]'},
}
def __init__(
self,
*,
ramp_up_rules: Optional[List["RampUpRule"]] = None,
**kwargs
):
super(Experiments, self).__init__(**kwargs)
self.ramp_up_rules = ramp_up_rules
class FileSystemApplicationLogsConfig(msrest.serialization.Model):
"""Application logs to file system configuration.
:param level: Log level. Possible values include: "Off", "Verbose", "Information", "Warning",
"Error".
:type level: str or ~azure.mgmt.web.v2016_08_01.models.LogLevel
"""
_attribute_map = {
'level': {'key': 'level', 'type': 'str'},
}
def __init__(
self,
*,
level: Optional[Union[str, "LogLevel"]] = None,
**kwargs
):
super(FileSystemApplicationLogsConfig, self).__init__(**kwargs)
self.level = level
class FileSystemHttpLogsConfig(msrest.serialization.Model):
"""Http logs to file system configuration.
:param retention_in_mb: Maximum size in megabytes that http log files can use.
When reached old log files will be removed to make space for new ones.
Value can range between 25 and 100.
:type retention_in_mb: int
:param retention_in_days: Retention in days.
Remove files older than X days.
0 or lower means no retention.
:type retention_in_days: int
:param enabled: True if configuration is enabled, false if it is disabled and null if
configuration is not set.
:type enabled: bool
"""
_validation = {
'retention_in_mb': {'maximum': 100, 'minimum': 25},
}
_attribute_map = {
'retention_in_mb': {'key': 'retentionInMb', 'type': 'int'},
'retention_in_days': {'key': 'retentionInDays', 'type': 'int'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
retention_in_mb: Optional[int] = None,
retention_in_days: Optional[int] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(FileSystemHttpLogsConfig, self).__init__(**kwargs)
self.retention_in_mb = retention_in_mb
self.retention_in_days = retention_in_days
self.enabled = enabled
class FunctionEnvelope(ProxyOnlyResource):
"""Web Job Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar name_properties_name: Function name.
:vartype name_properties_name: str
:ivar function_app_id: Function App ID.
:vartype function_app_id: str
:param script_root_path_href: Script root path URI.
:type script_root_path_href: str
:param script_href: Script URI.
:type script_href: str
:param config_href: Config URI.
:type config_href: str
:param secrets_file_href: Secrets file URI.
:type secrets_file_href: str
:param href: Function URI.
:type href: str
:param config: Config information.
:type config: any
:param files: File list.
:type files: dict[str, str]
:param test_data: Test data used when testing via the Azure Portal.
:type test_data: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'name_properties_name': {'readonly': True},
'function_app_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'function_app_id': {'key': 'properties.functionAppId', 'type': 'str'},
'script_root_path_href': {'key': 'properties.scriptRootPathHref', 'type': 'str'},
'script_href': {'key': 'properties.scriptHref', 'type': 'str'},
'config_href': {'key': 'properties.configHref', 'type': 'str'},
'secrets_file_href': {'key': 'properties.secretsFileHref', 'type': 'str'},
'href': {'key': 'properties.href', 'type': 'str'},
'config': {'key': 'properties.config', 'type': 'object'},
'files': {'key': 'properties.files', 'type': '{str}'},
'test_data': {'key': 'properties.testData', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
script_root_path_href: Optional[str] = None,
script_href: Optional[str] = None,
config_href: Optional[str] = None,
secrets_file_href: Optional[str] = None,
href: Optional[str] = None,
config: Optional[Any] = None,
files: Optional[Dict[str, str]] = None,
test_data: Optional[str] = None,
**kwargs
):
super(FunctionEnvelope, self).__init__(kind=kind, **kwargs)
self.name_properties_name = None
self.function_app_id = None
self.script_root_path_href = script_root_path_href
self.script_href = script_href
self.config_href = config_href
self.secrets_file_href = secrets_file_href
self.href = href
self.config = config
self.files = files
self.test_data = test_data
class FunctionEnvelopeCollection(msrest.serialization.Model):
"""Collection of Kudu function information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.FunctionEnvelope]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[FunctionEnvelope]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["FunctionEnvelope"],
**kwargs
):
super(FunctionEnvelopeCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class FunctionSecrets(ProxyOnlyResource):
"""Function secrets.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param key: Secret key.
:type key: str
:param trigger_url: Trigger URL.
:type trigger_url: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'key': {'key': 'properties.key', 'type': 'str'},
'trigger_url': {'key': 'properties.triggerUrl', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
key: Optional[str] = None,
trigger_url: Optional[str] = None,
**kwargs
):
super(FunctionSecrets, self).__init__(kind=kind, **kwargs)
self.key = key
self.trigger_url = trigger_url
class HandlerMapping(msrest.serialization.Model):
"""The IIS handler mappings used to define which handler processes HTTP requests with certain extension.
For example, it is used to configure php-cgi.exe process to handle all HTTP requests with *.php extension.
:param extension: Requests with this extension will be handled using the specified FastCGI
application.
:type extension: str
:param script_processor: The absolute path to the FastCGI application.
:type script_processor: str
:param arguments: Command-line arguments to be passed to the script processor.
:type arguments: str
"""
_attribute_map = {
'extension': {'key': 'extension', 'type': 'str'},
'script_processor': {'key': 'scriptProcessor', 'type': 'str'},
'arguments': {'key': 'arguments', 'type': 'str'},
}
def __init__(
self,
*,
extension: Optional[str] = None,
script_processor: Optional[str] = None,
arguments: Optional[str] = None,
**kwargs
):
super(HandlerMapping, self).__init__(**kwargs)
self.extension = extension
self.script_processor = script_processor
self.arguments = arguments
class HostingEnvironmentProfile(msrest.serialization.Model):
"""Specification for an App Service Environment to use for this resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID of the App Service Environment.
:type id: str
:ivar name: Name of the App Service Environment.
:vartype name: str
:ivar type: Resource type of the App Service Environment.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(HostingEnvironmentProfile, self).__init__(**kwargs)
self.id = id
self.name = None
self.type = None
class HostNameBinding(ProxyOnlyResource):
"""A hostname binding object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param site_name: App Service app name.
:type site_name: str
:param domain_id: Fully qualified ARM domain resource URI.
:type domain_id: str
:param azure_resource_name: Azure resource name.
:type azure_resource_name: str
:param azure_resource_type: Azure resource type. Possible values include: "Website",
"TrafficManager".
:type azure_resource_type: str or ~azure.mgmt.web.v2016_08_01.models.AzureResourceType
:param custom_host_name_dns_record_type: Custom DNS record type. Possible values include:
"CName", "A".
:type custom_host_name_dns_record_type: str or
~azure.mgmt.web.v2016_08_01.models.CustomHostNameDnsRecordType
:param host_name_type: Hostname type. Possible values include: "Verified", "Managed".
:type host_name_type: str or ~azure.mgmt.web.v2016_08_01.models.HostNameType
:param ssl_state: SSL type. Possible values include: "Disabled", "SniEnabled",
"IpBasedEnabled".
:type ssl_state: str or ~azure.mgmt.web.v2016_08_01.models.SslState
:param thumbprint: SSL certificate thumbprint.
:type thumbprint: str
:ivar virtual_ip: Virtual IP address assigned to the hostname if IP based SSL is enabled.
:vartype virtual_ip: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_ip': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'site_name': {'key': 'properties.siteName', 'type': 'str'},
'domain_id': {'key': 'properties.domainId', 'type': 'str'},
'azure_resource_name': {'key': 'properties.azureResourceName', 'type': 'str'},
'azure_resource_type': {'key': 'properties.azureResourceType', 'type': 'str'},
'custom_host_name_dns_record_type': {'key': 'properties.customHostNameDnsRecordType', 'type': 'str'},
'host_name_type': {'key': 'properties.hostNameType', 'type': 'str'},
'ssl_state': {'key': 'properties.sslState', 'type': 'str'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
'virtual_ip': {'key': 'properties.virtualIP', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
site_name: Optional[str] = None,
domain_id: Optional[str] = None,
azure_resource_name: Optional[str] = None,
azure_resource_type: Optional[Union[str, "AzureResourceType"]] = None,
custom_host_name_dns_record_type: Optional[Union[str, "CustomHostNameDnsRecordType"]] = None,
host_name_type: Optional[Union[str, "HostNameType"]] = None,
ssl_state: Optional[Union[str, "SslState"]] = None,
thumbprint: Optional[str] = None,
**kwargs
):
super(HostNameBinding, self).__init__(kind=kind, **kwargs)
self.site_name = site_name
self.domain_id = domain_id
self.azure_resource_name = azure_resource_name
self.azure_resource_type = azure_resource_type
self.custom_host_name_dns_record_type = custom_host_name_dns_record_type
self.host_name_type = host_name_type
self.ssl_state = ssl_state
self.thumbprint = thumbprint
self.virtual_ip = None
class HostNameBindingCollection(msrest.serialization.Model):
"""Collection of hostname bindings.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.HostNameBinding]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[HostNameBinding]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["HostNameBinding"],
**kwargs
):
super(HostNameBindingCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class HostNameSslState(msrest.serialization.Model):
"""SSL-enabled hostname.
:param name: Hostname.
:type name: str
:param ssl_state: SSL type. Possible values include: "Disabled", "SniEnabled",
"IpBasedEnabled".
:type ssl_state: str or ~azure.mgmt.web.v2016_08_01.models.SslState
:param virtual_ip: Virtual IP address assigned to the hostname if IP based SSL is enabled.
:type virtual_ip: str
:param thumbprint: SSL certificate thumbprint.
:type thumbprint: str
:param to_update: Set to :code:`<code>true</code>` to update existing hostname.
:type to_update: bool
:param host_type: Indicates whether the hostname is a standard or repository hostname. Possible
values include: "Standard", "Repository".
:type host_type: str or ~azure.mgmt.web.v2016_08_01.models.HostType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'ssl_state': {'key': 'sslState', 'type': 'str'},
'virtual_ip': {'key': 'virtualIP', 'type': 'str'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'to_update': {'key': 'toUpdate', 'type': 'bool'},
'host_type': {'key': 'hostType', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
ssl_state: Optional[Union[str, "SslState"]] = None,
virtual_ip: Optional[str] = None,
thumbprint: Optional[str] = None,
to_update: Optional[bool] = None,
host_type: Optional[Union[str, "HostType"]] = None,
**kwargs
):
super(HostNameSslState, self).__init__(**kwargs)
self.name = name
self.ssl_state = ssl_state
self.virtual_ip = virtual_ip
self.thumbprint = thumbprint
self.to_update = to_update
self.host_type = host_type
class HttpLogsConfig(msrest.serialization.Model):
"""Http logs configuration.
:param file_system: Http logs to file system configuration.
:type file_system: ~azure.mgmt.web.v2016_08_01.models.FileSystemHttpLogsConfig
:param azure_blob_storage: Http logs to azure blob storage configuration.
:type azure_blob_storage: ~azure.mgmt.web.v2016_08_01.models.AzureBlobStorageHttpLogsConfig
"""
_attribute_map = {
'file_system': {'key': 'fileSystem', 'type': 'FileSystemHttpLogsConfig'},
'azure_blob_storage': {'key': 'azureBlobStorage', 'type': 'AzureBlobStorageHttpLogsConfig'},
}
def __init__(
self,
*,
file_system: Optional["FileSystemHttpLogsConfig"] = None,
azure_blob_storage: Optional["AzureBlobStorageHttpLogsConfig"] = None,
**kwargs
):
super(HttpLogsConfig, self).__init__(**kwargs)
self.file_system = file_system
self.azure_blob_storage = azure_blob_storage
class HybridConnection(ProxyOnlyResource):
"""Hybrid Connection contract. This is used to configure a Hybrid Connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param service_bus_namespace: The name of the Service Bus namespace.
:type service_bus_namespace: str
:param relay_name: The name of the Service Bus relay.
:type relay_name: str
:param relay_arm_uri: The ARM URI to the Service Bus relay.
:type relay_arm_uri: str
:param hostname: The hostname of the endpoint.
:type hostname: str
:param port: The port of the endpoint.
:type port: int
:param send_key_name: The name of the Service Bus key which has Send permissions. This is used
to authenticate to Service Bus.
:type send_key_name: str
:param send_key_value: The value of the Service Bus key. This is used to authenticate to
Service Bus. In ARM this key will not be returned
normally, use the POST /listKeys API instead.
:type send_key_value: str
:param service_bus_suffix: The suffix for the service bus endpoint. By default this is
.servicebus.windows.net.
:type service_bus_suffix: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'service_bus_namespace': {'key': 'properties.serviceBusNamespace', 'type': 'str'},
'relay_name': {'key': 'properties.relayName', 'type': 'str'},
'relay_arm_uri': {'key': 'properties.relayArmUri', 'type': 'str'},
'hostname': {'key': 'properties.hostname', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'send_key_name': {'key': 'properties.sendKeyName', 'type': 'str'},
'send_key_value': {'key': 'properties.sendKeyValue', 'type': 'str'},
'service_bus_suffix': {'key': 'properties.serviceBusSuffix', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
service_bus_namespace: Optional[str] = None,
relay_name: Optional[str] = None,
relay_arm_uri: Optional[str] = None,
hostname: Optional[str] = None,
port: Optional[int] = None,
send_key_name: Optional[str] = None,
send_key_value: Optional[str] = None,
service_bus_suffix: Optional[str] = None,
**kwargs
):
super(HybridConnection, self).__init__(kind=kind, **kwargs)
self.service_bus_namespace = service_bus_namespace
self.relay_name = relay_name
self.relay_arm_uri = relay_arm_uri
self.hostname = hostname
self.port = port
self.send_key_name = send_key_name
self.send_key_value = send_key_value
self.service_bus_suffix = service_bus_suffix
class HybridConnectionKey(ProxyOnlyResource):
"""Hybrid Connection key contract. This has the send key name and value for a Hybrid Connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar send_key_name: The name of the send key.
:vartype send_key_name: str
:ivar send_key_value: The value of the send key.
:vartype send_key_value: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'send_key_name': {'readonly': True},
'send_key_value': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'send_key_name': {'key': 'properties.sendKeyName', 'type': 'str'},
'send_key_value': {'key': 'properties.sendKeyValue', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(HybridConnectionKey, self).__init__(kind=kind, **kwargs)
self.send_key_name = None
self.send_key_value = None
class Identifier(ProxyOnlyResource):
"""A domain specific resource identifier.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param id_properties_id: String representation of the identity.
:type id_properties_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'id_properties_id': {'key': 'properties.id', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
id_properties_id: Optional[str] = None,
**kwargs
):
super(Identifier, self).__init__(kind=kind, **kwargs)
self.id_properties_id = id_properties_id
class IdentifierCollection(msrest.serialization.Model):
"""Collection of identifiers.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.Identifier]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Identifier]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Identifier"],
**kwargs
):
super(IdentifierCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class IpSecurityRestriction(msrest.serialization.Model):
"""IP security restriction on an app.
All required parameters must be populated in order to send to Azure.
:param ip_address: Required. IP address the security restriction is valid for.
:type ip_address: str
:param subnet_mask: Subnet mask for the range of IP addresses the restriction is valid for.
:type subnet_mask: str
"""
_validation = {
'ip_address': {'required': True},
}
_attribute_map = {
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'subnet_mask': {'key': 'subnetMask', 'type': 'str'},
}
def __init__(
self,
*,
ip_address: str,
subnet_mask: Optional[str] = None,
**kwargs
):
super(IpSecurityRestriction, self).__init__(**kwargs)
self.ip_address = ip_address
self.subnet_mask = subnet_mask
class LocalizableString(msrest.serialization.Model):
"""Localizable string object containing the name and a localized value.
:param value: Non-localized name.
:type value: str
:param localized_value: Localized name.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[str] = None,
localized_value: Optional[str] = None,
**kwargs
):
super(LocalizableString, self).__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class ManagedServiceIdentity(msrest.serialization.Model):
"""Managed service identity.
Variables are only populated by the server, and will be ignored when sending a request.
:param type: Type of managed service identity. Possible values include: "SystemAssigned".
:type type: str or ~azure.mgmt.web.v2016_08_01.models.ManagedServiceIdentityType
:ivar tenant_id: Tenant of managed service identity.
:vartype tenant_id: str
:ivar principal_id: Principal Id of managed service identity.
:vartype principal_id: str
"""
_validation = {
'tenant_id': {'readonly': True},
'principal_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ManagedServiceIdentityType"]] = None,
**kwargs
):
super(ManagedServiceIdentity, self).__init__(**kwargs)
self.type = type
self.tenant_id = None
self.principal_id = None
class MigrateMySqlRequest(ProxyOnlyResource):
"""MySQL migration request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param connection_string: Connection string to the remote MySQL database.
:type connection_string: str
:param migration_type: The type of migration operation to be done. Possible values include:
"LocalToRemote", "RemoteToLocal".
:type migration_type: str or ~azure.mgmt.web.v2016_08_01.models.MySqlMigrationType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'connection_string': {'key': 'properties.connectionString', 'type': 'str'},
'migration_type': {'key': 'properties.migrationType', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
connection_string: Optional[str] = None,
migration_type: Optional[Union[str, "MySqlMigrationType"]] = None,
**kwargs
):
super(MigrateMySqlRequest, self).__init__(kind=kind, **kwargs)
self.connection_string = connection_string
self.migration_type = migration_type
class MigrateMySqlStatus(ProxyOnlyResource):
"""MySQL migration status.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar migration_operation_status: Status of the migration task. Possible values include:
"InProgress", "Failed", "Succeeded", "TimedOut", "Created".
:vartype migration_operation_status: str or ~azure.mgmt.web.v2016_08_01.models.OperationStatus
:ivar operation_id: Operation ID for the migration task.
:vartype operation_id: str
:ivar local_my_sql_enabled: True if the web app has in app MySql enabled.
:vartype local_my_sql_enabled: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'migration_operation_status': {'readonly': True},
'operation_id': {'readonly': True},
'local_my_sql_enabled': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'migration_operation_status': {'key': 'properties.migrationOperationStatus', 'type': 'str'},
'operation_id': {'key': 'properties.operationId', 'type': 'str'},
'local_my_sql_enabled': {'key': 'properties.localMySqlEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(MigrateMySqlStatus, self).__init__(kind=kind, **kwargs)
self.migration_operation_status = None
self.operation_id = None
self.local_my_sql_enabled = None
class MSDeploy(ProxyOnlyResource):
"""MSDeploy ARM PUT information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param package_uri: Package URI.
:type package_uri: str
:param connection_string: SQL Connection String.
:type connection_string: str
:param db_type: Database Type.
:type db_type: str
:param set_parameters_xml_file_uri: URI of MSDeploy Parameters file. Must not be set if
SetParameters is used.
:type set_parameters_xml_file_uri: str
:param set_parameters: MSDeploy Parameters. Must not be set if SetParametersXmlFileUri is used.
:type set_parameters: dict[str, str]
:param skip_app_data: Controls whether the MSDeploy operation skips the App_Data directory.
If set to :code:`<code>true</code>`, the existing App_Data directory on the destination
will not be deleted, and any App_Data directory in the source will be ignored.
Setting is :code:`<code>false</code>` by default.
:type skip_app_data: bool
:param app_offline: Sets the AppOffline rule while the MSDeploy operation executes.
Setting is :code:`<code>false</code>` by default.
:type app_offline: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'package_uri': {'key': 'properties.packageUri', 'type': 'str'},
'connection_string': {'key': 'properties.connectionString', 'type': 'str'},
'db_type': {'key': 'properties.dbType', 'type': 'str'},
'set_parameters_xml_file_uri': {'key': 'properties.setParametersXmlFileUri', 'type': 'str'},
'set_parameters': {'key': 'properties.setParameters', 'type': '{str}'},
'skip_app_data': {'key': 'properties.skipAppData', 'type': 'bool'},
'app_offline': {'key': 'properties.appOffline', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
package_uri: Optional[str] = None,
connection_string: Optional[str] = None,
db_type: Optional[str] = None,
set_parameters_xml_file_uri: Optional[str] = None,
set_parameters: Optional[Dict[str, str]] = None,
skip_app_data: Optional[bool] = None,
app_offline: Optional[bool] = None,
**kwargs
):
super(MSDeploy, self).__init__(kind=kind, **kwargs)
self.package_uri = package_uri
self.connection_string = connection_string
self.db_type = db_type
self.set_parameters_xml_file_uri = set_parameters_xml_file_uri
self.set_parameters = set_parameters
self.skip_app_data = skip_app_data
self.app_offline = app_offline
class MSDeployLog(ProxyOnlyResource):
"""MSDeploy log.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar entries: List of log entry messages.
:vartype entries: list[~azure.mgmt.web.v2016_08_01.models.MSDeployLogEntry]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'entries': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'entries': {'key': 'properties.entries', 'type': '[MSDeployLogEntry]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(MSDeployLog, self).__init__(kind=kind, **kwargs)
self.entries = None
class MSDeployLogEntry(msrest.serialization.Model):
"""MSDeploy log entry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar time: Timestamp of log entry.
:vartype time: ~datetime.datetime
:ivar type: Log entry type. Possible values include: "Message", "Warning", "Error".
:vartype type: str or ~azure.mgmt.web.v2016_08_01.models.MSDeployLogEntryType
:ivar message: Log entry message.
:vartype message: str
"""
_validation = {
'time': {'readonly': True},
'type': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'time': {'key': 'time', 'type': 'iso-8601'},
'type': {'key': 'type', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MSDeployLogEntry, self).__init__(**kwargs)
self.time = None
self.type = None
self.message = None
class MSDeployStatus(ProxyOnlyResource):
"""MSDeploy ARM response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar deployer: Username of deployer.
:vartype deployer: str
:ivar provisioning_state: Provisioning state. Possible values include: "accepted", "running",
"succeeded", "failed", "canceled".
:vartype provisioning_state: str or
~azure.mgmt.web.v2016_08_01.models.MSDeployProvisioningState
:ivar start_time: Start time of deploy operation.
:vartype start_time: ~datetime.datetime
:ivar end_time: End time of deploy operation.
:vartype end_time: ~datetime.datetime
:ivar complete: Whether the deployment operation has completed.
:vartype complete: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'deployer': {'readonly': True},
'provisioning_state': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'complete': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'deployer': {'key': 'properties.deployer', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'complete': {'key': 'properties.complete', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(MSDeployStatus, self).__init__(kind=kind, **kwargs)
self.deployer = None
self.provisioning_state = None
self.start_time = None
self.end_time = None
self.complete = None
class NameValuePair(msrest.serialization.Model):
"""Name value pair.
:param name: Pair name.
:type name: str
:param value: Pair value.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(NameValuePair, self).__init__(**kwargs)
self.name = name
self.value = value
class NetworkFeatures(ProxyOnlyResource):
"""Full view of network features for an app (presently VNET integration and Hybrid Connections).
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar virtual_network_name: The Virtual Network name.
:vartype virtual_network_name: str
:ivar virtual_network_connection: The Virtual Network summary view.
:vartype virtual_network_connection: ~azure.mgmt.web.v2016_08_01.models.VnetInfo
:ivar hybrid_connections: The Hybrid Connections summary view.
:vartype hybrid_connections:
list[~azure.mgmt.web.v2016_08_01.models.RelayServiceConnectionEntity]
:ivar hybrid_connections_v2: The Hybrid Connection V2 (Service Bus) view.
:vartype hybrid_connections_v2: list[~azure.mgmt.web.v2016_08_01.models.HybridConnection]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_network_name': {'readonly': True},
'virtual_network_connection': {'readonly': True},
'hybrid_connections': {'readonly': True},
'hybrid_connections_v2': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'virtual_network_name': {'key': 'properties.virtualNetworkName', 'type': 'str'},
'virtual_network_connection': {'key': 'properties.virtualNetworkConnection', 'type': 'VnetInfo'},
'hybrid_connections': {'key': 'properties.hybridConnections', 'type': '[RelayServiceConnectionEntity]'},
'hybrid_connections_v2': {'key': 'properties.hybridConnectionsV2', 'type': '[HybridConnection]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(NetworkFeatures, self).__init__(kind=kind, **kwargs)
self.virtual_network_name = None
self.virtual_network_connection = None
self.hybrid_connections = None
self.hybrid_connections_v2 = None
class Operation(msrest.serialization.Model):
"""An operation on a resource.
:param id: Operation ID.
:type id: str
:param name: Operation name.
:type name: str
:param status: The current status of the operation. Possible values include: "InProgress",
"Failed", "Succeeded", "TimedOut", "Created".
:type status: str or ~azure.mgmt.web.v2016_08_01.models.OperationStatus
:param errors: Any errors associate with the operation.
:type errors: list[~azure.mgmt.web.v2016_08_01.models.ErrorEntity]
:param created_time: Time when operation has started.
:type created_time: ~datetime.datetime
:param modified_time: Time when operation has been updated.
:type modified_time: ~datetime.datetime
:param expiration_time: Time when operation will expire.
:type expiration_time: ~datetime.datetime
:param geo_master_operation_id: Applicable only for stamp operation ids.
:type geo_master_operation_id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[ErrorEntity]'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'modified_time': {'key': 'modifiedTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'expirationTime', 'type': 'iso-8601'},
'geo_master_operation_id': {'key': 'geoMasterOperationId', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
name: Optional[str] = None,
status: Optional[Union[str, "OperationStatus"]] = None,
errors: Optional[List["ErrorEntity"]] = None,
created_time: Optional[datetime.datetime] = None,
modified_time: Optional[datetime.datetime] = None,
expiration_time: Optional[datetime.datetime] = None,
geo_master_operation_id: Optional[str] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.id = id
self.name = name
self.status = status
self.errors = errors
self.created_time = created_time
self.modified_time = modified_time
self.expiration_time = expiration_time
self.geo_master_operation_id = geo_master_operation_id
class PerfMonCounterCollection(msrest.serialization.Model):
"""Collection of performance monitor counters.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.PerfMonResponse]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PerfMonResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["PerfMonResponse"],
**kwargs
):
super(PerfMonCounterCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class PerfMonResponse(msrest.serialization.Model):
"""Performance monitor API response.
:param code: The response code.
:type code: str
:param message: The message.
:type message: str
:param data: The performance monitor counters.
:type data: ~azure.mgmt.web.v2016_08_01.models.PerfMonSet
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'data': {'key': 'data', 'type': 'PerfMonSet'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
data: Optional["PerfMonSet"] = None,
**kwargs
):
super(PerfMonResponse, self).__init__(**kwargs)
self.code = code
self.message = message
self.data = data
class PerfMonSample(msrest.serialization.Model):
"""Performance monitor sample in a set.
:param time: Point in time for which counter was measured.
:type time: ~datetime.datetime
:param instance_name: Name of the server on which the measurement is made.
:type instance_name: str
:param value: Value of counter at a certain time.
:type value: float
:param core_count: Core Count of worker. Not a data member.
:type core_count: int
"""
_attribute_map = {
'time': {'key': 'time', 'type': 'iso-8601'},
'instance_name': {'key': 'instanceName', 'type': 'str'},
'value': {'key': 'value', 'type': 'float'},
'core_count': {'key': 'coreCount', 'type': 'int'},
}
def __init__(
self,
*,
time: Optional[datetime.datetime] = None,
instance_name: Optional[str] = None,
value: Optional[float] = None,
core_count: Optional[int] = None,
**kwargs
):
super(PerfMonSample, self).__init__(**kwargs)
self.time = time
self.instance_name = instance_name
self.value = value
self.core_count = core_count
class PerfMonSet(msrest.serialization.Model):
"""Metric information.
:param name: Unique key name of the counter.
:type name: str
:param start_time: Start time of the period.
:type start_time: ~datetime.datetime
:param end_time: End time of the period.
:type end_time: ~datetime.datetime
:param time_grain: Presented time grain.
:type time_grain: str
:param values: Collection of workers that are active during this time.
:type values: list[~azure.mgmt.web.v2016_08_01.models.PerfMonSample]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'values': {'key': 'values', 'type': '[PerfMonSample]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
time_grain: Optional[str] = None,
values: Optional[List["PerfMonSample"]] = None,
**kwargs
):
super(PerfMonSet, self).__init__(**kwargs)
self.name = name
self.start_time = start_time
self.end_time = end_time
self.time_grain = time_grain
self.values = values
class Resource(msrest.serialization.Model):
"""Azure resource. This resource is tracked in Azure Resource Manager.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.kind = kind
self.location = location
self.type = None
self.tags = tags
class PremierAddOn(Resource):
"""Premier add-on.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: Premier add on SKU.
:type sku: str
:param product: Premier add on Product.
:type product: str
:param vendor: Premier add on Vendor.
:type vendor: str
:param premier_add_on_name: Premier add on Name.
:type premier_add_on_name: str
:param location_properties_location: Premier add on Location.
:type location_properties_location: str
:param tags_properties_tags: Premier add on Tags.
:type tags_properties_tags: dict[str, str]
:param marketplace_publisher: Premier add on Marketplace publisher.
:type marketplace_publisher: str
:param marketplace_offer: Premier add on Marketplace offer.
:type marketplace_offer: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'properties.sku', 'type': 'str'},
'product': {'key': 'properties.product', 'type': 'str'},
'vendor': {'key': 'properties.vendor', 'type': 'str'},
'premier_add_on_name': {'key': 'properties.name', 'type': 'str'},
'location_properties_location': {'key': 'properties.location', 'type': 'str'},
'tags_properties_tags': {'key': 'properties.tags', 'type': '{str}'},
'marketplace_publisher': {'key': 'properties.marketplacePublisher', 'type': 'str'},
'marketplace_offer': {'key': 'properties.marketplaceOffer', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional[str] = None,
product: Optional[str] = None,
vendor: Optional[str] = None,
premier_add_on_name: Optional[str] = None,
location_properties_location: Optional[str] = None,
tags_properties_tags: Optional[Dict[str, str]] = None,
marketplace_publisher: Optional[str] = None,
marketplace_offer: Optional[str] = None,
**kwargs
):
super(PremierAddOn, self).__init__(kind=kind, location=location, tags=tags, **kwargs)
self.sku = sku
self.product = product
self.vendor = vendor
self.premier_add_on_name = premier_add_on_name
self.location_properties_location = location_properties_location
self.tags_properties_tags = tags_properties_tags
self.marketplace_publisher = marketplace_publisher
self.marketplace_offer = marketplace_offer
class ProcessInfo(ProxyOnlyResource):
"""Process Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param id_properties_id: ARM Identifier for deployment.
:type id_properties_id: int
:param name_properties_name: Deployment name.
:type name_properties_name: str
:param href: HRef URI.
:type href: str
:param mini_dump: Minidump URI.
:type mini_dump: str
:param is_profile_running: Is profile running?.
:type is_profile_running: bool
:param is_iis_profile_running: Is the IIS Profile running?.
:type is_iis_profile_running: bool
:param iis_profile_timeout_in_seconds: IIS Profile timeout (seconds).
:type iis_profile_timeout_in_seconds: float
:param parent: Parent process.
:type parent: str
:param children: Child process list.
:type children: list[str]
:param threads: Thread list.
:type threads: list[~azure.mgmt.web.v2016_08_01.models.ProcessThreadInfo]
:param open_file_handles: List of open files.
:type open_file_handles: list[str]
:param modules: List of modules.
:type modules: list[~azure.mgmt.web.v2016_08_01.models.ProcessModuleInfo]
:param file_name: File name of this process.
:type file_name: str
:param command_line: Command line.
:type command_line: str
:param user_name: User name.
:type user_name: str
:param handle_count: Handle count.
:type handle_count: int
:param module_count: Module count.
:type module_count: int
:param thread_count: Thread count.
:type thread_count: int
:param start_time: Start time.
:type start_time: ~datetime.datetime
:param total_processor_time: Total CPU time.
:type total_processor_time: str
:param user_processor_time: User CPU time.
:type user_processor_time: str
:param privileged_processor_time: Privileged CPU time.
:type privileged_processor_time: str
:param working_set64: Working set.
:type working_set64: long
:param peak_working_set64: Peak working set.
:type peak_working_set64: long
:param private_memory_size64: Private memory size.
:type private_memory_size64: long
:param virtual_memory_size64: Virtual memory size.
:type virtual_memory_size64: long
:param peak_virtual_memory_size64: Peak virtual memory usage.
:type peak_virtual_memory_size64: long
:param paged_system_memory_size64: Paged system memory.
:type paged_system_memory_size64: long
:param nonpaged_system_memory_size64: Non-paged system memory.
:type nonpaged_system_memory_size64: long
:param paged_memory_size64: Paged memory.
:type paged_memory_size64: long
:param peak_paged_memory_size64: Peak paged memory.
:type peak_paged_memory_size64: long
:param time_stamp: Time stamp.
:type time_stamp: ~datetime.datetime
:param environment_variables: List of environment variables.
:type environment_variables: dict[str, str]
:param is_scm_site: Is this the SCM site?.
:type is_scm_site: bool
:param is_web_job: Is this a Web Job?.
:type is_web_job: bool
:param description: Description of process.
:type description: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'id_properties_id': {'key': 'properties.id', 'type': 'int'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'href': {'key': 'properties.href', 'type': 'str'},
'mini_dump': {'key': 'properties.miniDump', 'type': 'str'},
'is_profile_running': {'key': 'properties.isProfileRunning', 'type': 'bool'},
'is_iis_profile_running': {'key': 'properties.isIisProfileRunning', 'type': 'bool'},
'iis_profile_timeout_in_seconds': {'key': 'properties.iisProfileTimeoutInSeconds', 'type': 'float'},
'parent': {'key': 'properties.parent', 'type': 'str'},
'children': {'key': 'properties.children', 'type': '[str]'},
'threads': {'key': 'properties.threads', 'type': '[ProcessThreadInfo]'},
'open_file_handles': {'key': 'properties.openFileHandles', 'type': '[str]'},
'modules': {'key': 'properties.modules', 'type': '[ProcessModuleInfo]'},
'file_name': {'key': 'properties.fileName', 'type': 'str'},
'command_line': {'key': 'properties.commandLine', 'type': 'str'},
'user_name': {'key': 'properties.userName', 'type': 'str'},
'handle_count': {'key': 'properties.handleCount', 'type': 'int'},
'module_count': {'key': 'properties.moduleCount', 'type': 'int'},
'thread_count': {'key': 'properties.threadCount', 'type': 'int'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'total_processor_time': {'key': 'properties.totalProcessorTime', 'type': 'str'},
'user_processor_time': {'key': 'properties.userProcessorTime', 'type': 'str'},
'privileged_processor_time': {'key': 'properties.privilegedProcessorTime', 'type': 'str'},
'working_set64': {'key': 'properties.workingSet64', 'type': 'long'},
'peak_working_set64': {'key': 'properties.peakWorkingSet64', 'type': 'long'},
'private_memory_size64': {'key': 'properties.privateMemorySize64', 'type': 'long'},
'virtual_memory_size64': {'key': 'properties.virtualMemorySize64', 'type': 'long'},
'peak_virtual_memory_size64': {'key': 'properties.peakVirtualMemorySize64', 'type': 'long'},
'paged_system_memory_size64': {'key': 'properties.pagedSystemMemorySize64', 'type': 'long'},
'nonpaged_system_memory_size64': {'key': 'properties.nonpagedSystemMemorySize64', 'type': 'long'},
'paged_memory_size64': {'key': 'properties.pagedMemorySize64', 'type': 'long'},
'peak_paged_memory_size64': {'key': 'properties.peakPagedMemorySize64', 'type': 'long'},
'time_stamp': {'key': 'properties.timeStamp', 'type': 'iso-8601'},
'environment_variables': {'key': 'properties.environmentVariables', 'type': '{str}'},
'is_scm_site': {'key': 'properties.isScmSite', 'type': 'bool'},
'is_web_job': {'key': 'properties.isWebJob', 'type': 'bool'},
'description': {'key': 'properties.description', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
id_properties_id: Optional[int] = None,
name_properties_name: Optional[str] = None,
href: Optional[str] = None,
mini_dump: Optional[str] = None,
is_profile_running: Optional[bool] = None,
is_iis_profile_running: Optional[bool] = None,
iis_profile_timeout_in_seconds: Optional[float] = None,
parent: Optional[str] = None,
children: Optional[List[str]] = None,
threads: Optional[List["ProcessThreadInfo"]] = None,
open_file_handles: Optional[List[str]] = None,
modules: Optional[List["ProcessModuleInfo"]] = None,
file_name: Optional[str] = None,
command_line: Optional[str] = None,
user_name: Optional[str] = None,
handle_count: Optional[int] = None,
module_count: Optional[int] = None,
thread_count: Optional[int] = None,
start_time: Optional[datetime.datetime] = None,
total_processor_time: Optional[str] = None,
user_processor_time: Optional[str] = None,
privileged_processor_time: Optional[str] = None,
working_set64: Optional[int] = None,
peak_working_set64: Optional[int] = None,
private_memory_size64: Optional[int] = None,
virtual_memory_size64: Optional[int] = None,
peak_virtual_memory_size64: Optional[int] = None,
paged_system_memory_size64: Optional[int] = None,
nonpaged_system_memory_size64: Optional[int] = None,
paged_memory_size64: Optional[int] = None,
peak_paged_memory_size64: Optional[int] = None,
time_stamp: Optional[datetime.datetime] = None,
environment_variables: Optional[Dict[str, str]] = None,
is_scm_site: Optional[bool] = None,
is_web_job: Optional[bool] = None,
description: Optional[str] = None,
**kwargs
):
super(ProcessInfo, self).__init__(kind=kind, **kwargs)
self.id_properties_id = id_properties_id
self.name_properties_name = name_properties_name
self.href = href
self.mini_dump = mini_dump
self.is_profile_running = is_profile_running
self.is_iis_profile_running = is_iis_profile_running
self.iis_profile_timeout_in_seconds = iis_profile_timeout_in_seconds
self.parent = parent
self.children = children
self.threads = threads
self.open_file_handles = open_file_handles
self.modules = modules
self.file_name = file_name
self.command_line = command_line
self.user_name = user_name
self.handle_count = handle_count
self.module_count = module_count
self.thread_count = thread_count
self.start_time = start_time
self.total_processor_time = total_processor_time
self.user_processor_time = user_processor_time
self.privileged_processor_time = privileged_processor_time
self.working_set64 = working_set64
self.peak_working_set64 = peak_working_set64
self.private_memory_size64 = private_memory_size64
self.virtual_memory_size64 = virtual_memory_size64
self.peak_virtual_memory_size64 = peak_virtual_memory_size64
self.paged_system_memory_size64 = paged_system_memory_size64
self.nonpaged_system_memory_size64 = nonpaged_system_memory_size64
self.paged_memory_size64 = paged_memory_size64
self.peak_paged_memory_size64 = peak_paged_memory_size64
self.time_stamp = time_stamp
self.environment_variables = environment_variables
self.is_scm_site = is_scm_site
self.is_web_job = is_web_job
self.description = description
class ProcessInfoCollection(msrest.serialization.Model):
"""Collection of Kudu process information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.ProcessInfo]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ProcessInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ProcessInfo"],
**kwargs
):
super(ProcessInfoCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ProcessModuleInfo(ProxyOnlyResource):
"""Process Module Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param base_address: Base address. Used as module identifier in ARM resource URI.
:type base_address: str
:param file_name: File name.
:type file_name: str
:param href: HRef URI.
:type href: str
:param file_path: File path.
:type file_path: str
:param module_memory_size: Module memory size.
:type module_memory_size: int
:param file_version: File version.
:type file_version: str
:param file_description: File description.
:type file_description: str
:param product: Product name.
:type product: str
:param product_version: Product version.
:type product_version: str
:param is_debug: Is debug?.
:type is_debug: bool
:param language: Module language (locale).
:type language: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'base_address': {'key': 'properties.baseAddress', 'type': 'str'},
'file_name': {'key': 'properties.fileName', 'type': 'str'},
'href': {'key': 'properties.href', 'type': 'str'},
'file_path': {'key': 'properties.filePath', 'type': 'str'},
'module_memory_size': {'key': 'properties.moduleMemorySize', 'type': 'int'},
'file_version': {'key': 'properties.fileVersion', 'type': 'str'},
'file_description': {'key': 'properties.fileDescription', 'type': 'str'},
'product': {'key': 'properties.product', 'type': 'str'},
'product_version': {'key': 'properties.productVersion', 'type': 'str'},
'is_debug': {'key': 'properties.isDebug', 'type': 'bool'},
'language': {'key': 'properties.language', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
base_address: Optional[str] = None,
file_name: Optional[str] = None,
href: Optional[str] = None,
file_path: Optional[str] = None,
module_memory_size: Optional[int] = None,
file_version: Optional[str] = None,
file_description: Optional[str] = None,
product: Optional[str] = None,
product_version: Optional[str] = None,
is_debug: Optional[bool] = None,
language: Optional[str] = None,
**kwargs
):
super(ProcessModuleInfo, self).__init__(kind=kind, **kwargs)
self.base_address = base_address
self.file_name = file_name
self.href = href
self.file_path = file_path
self.module_memory_size = module_memory_size
self.file_version = file_version
self.file_description = file_description
self.product = product
self.product_version = product_version
self.is_debug = is_debug
self.language = language
class ProcessModuleInfoCollection(msrest.serialization.Model):
"""Collection of Kudu thread information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.ProcessModuleInfo]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ProcessModuleInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ProcessModuleInfo"],
**kwargs
):
super(ProcessModuleInfoCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ProcessThreadInfo(ProxyOnlyResource):
"""Process Thread Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param id_properties_id: ARM Identifier for deployment.
:type id_properties_id: int
:param href: HRef URI.
:type href: str
:param process: Process URI.
:type process: str
:param start_address: Start address.
:type start_address: str
:param current_priority: Current thread priority.
:type current_priority: int
:param priority_level: Thread priority level.
:type priority_level: str
:param base_priority: Base priority.
:type base_priority: int
:param start_time: Start time.
:type start_time: ~datetime.datetime
:param total_processor_time: Total processor time.
:type total_processor_time: str
:param user_processor_time: User processor time.
:type user_processor_time: str
:param priviledged_processor_time: Privileged processor time.
:type priviledged_processor_time: str
:param state: Thread state.
:type state: str
:param wait_reason: Wait reason.
:type wait_reason: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'id_properties_id': {'key': 'properties.id', 'type': 'int'},
'href': {'key': 'properties.href', 'type': 'str'},
'process': {'key': 'properties.process', 'type': 'str'},
'start_address': {'key': 'properties.startAddress', 'type': 'str'},
'current_priority': {'key': 'properties.currentPriority', 'type': 'int'},
'priority_level': {'key': 'properties.priorityLevel', 'type': 'str'},
'base_priority': {'key': 'properties.basePriority', 'type': 'int'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'total_processor_time': {'key': 'properties.totalProcessorTime', 'type': 'str'},
'user_processor_time': {'key': 'properties.userProcessorTime', 'type': 'str'},
'priviledged_processor_time': {'key': 'properties.priviledgedProcessorTime', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'wait_reason': {'key': 'properties.waitReason', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
id_properties_id: Optional[int] = None,
href: Optional[str] = None,
process: Optional[str] = None,
start_address: Optional[str] = None,
current_priority: Optional[int] = None,
priority_level: Optional[str] = None,
base_priority: Optional[int] = None,
start_time: Optional[datetime.datetime] = None,
total_processor_time: Optional[str] = None,
user_processor_time: Optional[str] = None,
priviledged_processor_time: Optional[str] = None,
state: Optional[str] = None,
wait_reason: Optional[str] = None,
**kwargs
):
super(ProcessThreadInfo, self).__init__(kind=kind, **kwargs)
self.id_properties_id = id_properties_id
self.href = href
self.process = process
self.start_address = start_address
self.current_priority = current_priority
self.priority_level = priority_level
self.base_priority = base_priority
self.start_time = start_time
self.total_processor_time = total_processor_time
self.user_processor_time = user_processor_time
self.priviledged_processor_time = priviledged_processor_time
self.state = state
self.wait_reason = wait_reason
class ProcessThreadInfoCollection(msrest.serialization.Model):
"""Collection of Kudu thread information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.ProcessThreadInfo]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ProcessThreadInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ProcessThreadInfo"],
**kwargs
):
super(ProcessThreadInfoCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class PublicCertificate(ProxyOnlyResource):
"""Public certificate object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param blob: Public Certificate byte array.
:type blob: bytearray
:param public_certificate_location: Public Certificate Location. Possible values include:
"CurrentUserMy", "LocalMachineMy", "Unknown".
:type public_certificate_location: str or
~azure.mgmt.web.v2016_08_01.models.PublicCertificateLocation
:ivar thumbprint: Certificate Thumbprint.
:vartype thumbprint: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'thumbprint': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'blob': {'key': 'properties.blob', 'type': 'bytearray'},
'public_certificate_location': {'key': 'properties.publicCertificateLocation', 'type': 'str'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
blob: Optional[bytearray] = None,
public_certificate_location: Optional[Union[str, "PublicCertificateLocation"]] = None,
**kwargs
):
super(PublicCertificate, self).__init__(kind=kind, **kwargs)
self.blob = blob
self.public_certificate_location = public_certificate_location
self.thumbprint = None
class PublicCertificateCollection(msrest.serialization.Model):
"""Collection of public certificates.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.PublicCertificate]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PublicCertificate]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["PublicCertificate"],
**kwargs
):
super(PublicCertificateCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class PushSettings(ProxyOnlyResource):
"""Push settings for the App.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param is_push_enabled: Gets or sets a flag indicating whether the Push endpoint is enabled.
:type is_push_enabled: bool
:param tag_whitelist_json: Gets or sets a JSON string containing a list of tags that are
whitelisted for use by the push registration endpoint.
:type tag_whitelist_json: str
:param tags_requiring_auth: Gets or sets a JSON string containing a list of tags that require
user authentication to be used in the push registration endpoint.
Tags can consist of alphanumeric characters and the following:
'_', '@', '#', '.', ':', '-'.
Validation should be performed at the PushRequestHandler.
:type tags_requiring_auth: str
:param dynamic_tags_json: Gets or sets a JSON string containing a list of dynamic tags that
will be evaluated from user claims in the push registration endpoint.
:type dynamic_tags_json: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'is_push_enabled': {'key': 'properties.isPushEnabled', 'type': 'bool'},
'tag_whitelist_json': {'key': 'properties.tagWhitelistJson', 'type': 'str'},
'tags_requiring_auth': {'key': 'properties.tagsRequiringAuth', 'type': 'str'},
'dynamic_tags_json': {'key': 'properties.dynamicTagsJson', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
is_push_enabled: Optional[bool] = None,
tag_whitelist_json: Optional[str] = None,
tags_requiring_auth: Optional[str] = None,
dynamic_tags_json: Optional[str] = None,
**kwargs
):
super(PushSettings, self).__init__(kind=kind, **kwargs)
self.is_push_enabled = is_push_enabled
self.tag_whitelist_json = tag_whitelist_json
self.tags_requiring_auth = tags_requiring_auth
self.dynamic_tags_json = dynamic_tags_json
class RampUpRule(msrest.serialization.Model):
"""Routing rules for ramp up testing. This rule allows to redirect static traffic % to a slot or to gradually change routing % based on performance.
:param action_host_name: Hostname of a slot to which the traffic will be redirected if decided
to. E.g. myapp-stage.azurewebsites.net.
:type action_host_name: str
:param reroute_percentage: Percentage of the traffic which will be redirected to
:code:`<code>ActionHostName</code>`.
:type reroute_percentage: float
:param change_step: In auto ramp up scenario this is the step to add/remove from
:code:`<code>ReroutePercentage</code>` until it reaches
:code:`<code>MinReroutePercentage</code>` or :code:`<code>MaxReroutePercentage</code>`. Site
metrics are checked every N minutes specified in :code:`<code>ChangeIntervalInMinutes</code>`.
Custom decision algorithm can be provided in TiPCallback site extension which URL can be
specified in :code:`<code>ChangeDecisionCallbackUrl</code>`.
:type change_step: float
:param change_interval_in_minutes: Specifies interval in minutes to reevaluate
ReroutePercentage.
:type change_interval_in_minutes: int
:param min_reroute_percentage: Specifies lower boundary above which ReroutePercentage will
stay.
:type min_reroute_percentage: float
:param max_reroute_percentage: Specifies upper boundary below which ReroutePercentage will
stay.
:type max_reroute_percentage: float
:param change_decision_callback_url: Custom decision algorithm can be provided in TiPCallback
site extension which URL can be specified. See TiPCallback site extension for the scaffold and
contracts.
https://www.siteextensions.net/packages/TiPCallback/.
:type change_decision_callback_url: str
:param name: Name of the routing rule. The recommended name would be to point to the slot which
will receive the traffic in the experiment.
:type name: str
"""
_attribute_map = {
'action_host_name': {'key': 'actionHostName', 'type': 'str'},
'reroute_percentage': {'key': 'reroutePercentage', 'type': 'float'},
'change_step': {'key': 'changeStep', 'type': 'float'},
'change_interval_in_minutes': {'key': 'changeIntervalInMinutes', 'type': 'int'},
'min_reroute_percentage': {'key': 'minReroutePercentage', 'type': 'float'},
'max_reroute_percentage': {'key': 'maxReroutePercentage', 'type': 'float'},
'change_decision_callback_url': {'key': 'changeDecisionCallbackUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
action_host_name: Optional[str] = None,
reroute_percentage: Optional[float] = None,
change_step: Optional[float] = None,
change_interval_in_minutes: Optional[int] = None,
min_reroute_percentage: Optional[float] = None,
max_reroute_percentage: Optional[float] = None,
change_decision_callback_url: Optional[str] = None,
name: Optional[str] = None,
**kwargs
):
super(RampUpRule, self).__init__(**kwargs)
self.action_host_name = action_host_name
self.reroute_percentage = reroute_percentage
self.change_step = change_step
self.change_interval_in_minutes = change_interval_in_minutes
self.min_reroute_percentage = min_reroute_percentage
self.max_reroute_percentage = max_reroute_percentage
self.change_decision_callback_url = change_decision_callback_url
self.name = name
class RelayServiceConnectionEntity(ProxyOnlyResource):
"""Hybrid Connection for an App Service app.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param entity_name:
:type entity_name: str
:param entity_connection_string:
:type entity_connection_string: str
:param resource_type:
:type resource_type: str
:param resource_connection_string:
:type resource_connection_string: str
:param hostname:
:type hostname: str
:param port:
:type port: int
:param biztalk_uri:
:type biztalk_uri: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'entity_name': {'key': 'properties.entityName', 'type': 'str'},
'entity_connection_string': {'key': 'properties.entityConnectionString', 'type': 'str'},
'resource_type': {'key': 'properties.resourceType', 'type': 'str'},
'resource_connection_string': {'key': 'properties.resourceConnectionString', 'type': 'str'},
'hostname': {'key': 'properties.hostname', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'biztalk_uri': {'key': 'properties.biztalkUri', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
entity_name: Optional[str] = None,
entity_connection_string: Optional[str] = None,
resource_type: Optional[str] = None,
resource_connection_string: Optional[str] = None,
hostname: Optional[str] = None,
port: Optional[int] = None,
biztalk_uri: Optional[str] = None,
**kwargs
):
super(RelayServiceConnectionEntity, self).__init__(kind=kind, **kwargs)
self.entity_name = entity_name
self.entity_connection_string = entity_connection_string
self.resource_type = resource_type
self.resource_connection_string = resource_connection_string
self.hostname = hostname
self.port = port
self.biztalk_uri = biztalk_uri
class RequestsBasedTrigger(msrest.serialization.Model):
"""Trigger based on total requests.
:param count: Request Count.
:type count: int
:param time_interval: Time interval.
:type time_interval: str
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'time_interval': {'key': 'timeInterval', 'type': 'str'},
}
def __init__(
self,
*,
count: Optional[int] = None,
time_interval: Optional[str] = None,
**kwargs
):
super(RequestsBasedTrigger, self).__init__(**kwargs)
self.count = count
self.time_interval = time_interval
class ResourceMetric(msrest.serialization.Model):
"""Object representing a metric for any resource .
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of metric.
:vartype name: ~azure.mgmt.web.v2016_08_01.models.ResourceMetricName
:ivar unit: Metric unit.
:vartype unit: str
:ivar time_grain: Metric granularity. E.g PT1H, PT5M, P1D.
:vartype time_grain: str
:ivar start_time: Metric start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: Metric end time.
:vartype end_time: ~datetime.datetime
:ivar resource_id: Metric resource Id.
:vartype resource_id: str
:ivar id: Resource Id.
:vartype id: str
:ivar metric_values: Metric values.
:vartype metric_values: list[~azure.mgmt.web.v2016_08_01.models.ResourceMetricValue]
:ivar properties: Resource metric properties collection.
:vartype properties: list[~azure.mgmt.web.v2016_08_01.models.ResourceMetricProperty]
"""
_validation = {
'name': {'readonly': True},
'unit': {'readonly': True},
'time_grain': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'resource_id': {'readonly': True},
'id': {'readonly': True},
'metric_values': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'ResourceMetricName'},
'unit': {'key': 'unit', 'type': 'str'},
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'metric_values': {'key': 'metricValues', 'type': '[ResourceMetricValue]'},
'properties': {'key': 'properties', 'type': '[ResourceMetricProperty]'},
}
def __init__(
self,
**kwargs
):
super(ResourceMetric, self).__init__(**kwargs)
self.name = None
self.unit = None
self.time_grain = None
self.start_time = None
self.end_time = None
self.resource_id = None
self.id = None
self.metric_values = None
self.properties = None
class ResourceMetricAvailability(msrest.serialization.Model):
"""Metrics availability and retention.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar time_grain: Time grain .
:vartype time_grain: str
:ivar retention: Retention period for the current time grain.
:vartype retention: str
"""
_validation = {
'time_grain': {'readonly': True},
'retention': {'readonly': True},
}
_attribute_map = {
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'retention': {'key': 'retention', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceMetricAvailability, self).__init__(**kwargs)
self.time_grain = None
self.retention = None
class ResourceMetricCollection(msrest.serialization.Model):
"""Collection of metric responses.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.ResourceMetric]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceMetric]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ResourceMetric"],
**kwargs
):
super(ResourceMetricCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ResourceMetricDefinition(ProxyOnlyResource):
"""Metadata for the metrics.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar name_properties_name: Name of the metric.
:vartype name_properties_name: ~azure.mgmt.web.v2016_08_01.models.ResourceMetricName
:ivar unit: Unit of the metric.
:vartype unit: str
:ivar primary_aggregation_type: Primary aggregation type.
:vartype primary_aggregation_type: str
:ivar metric_availabilities: List of time grains supported for the metric together with
retention period.
:vartype metric_availabilities:
list[~azure.mgmt.web.v2016_08_01.models.ResourceMetricAvailability]
:ivar resource_uri: Resource URI.
:vartype resource_uri: str
:ivar id_properties_id: Resource ID.
:vartype id_properties_id: str
:ivar properties: Resource metric definition properties.
:vartype properties: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'name_properties_name': {'readonly': True},
'unit': {'readonly': True},
'primary_aggregation_type': {'readonly': True},
'metric_availabilities': {'readonly': True},
'resource_uri': {'readonly': True},
'id_properties_id': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'ResourceMetricName'},
'unit': {'key': 'properties.unit', 'type': 'str'},
'primary_aggregation_type': {'key': 'properties.primaryAggregationType', 'type': 'str'},
'metric_availabilities': {'key': 'properties.metricAvailabilities', 'type': '[ResourceMetricAvailability]'},
'resource_uri': {'key': 'properties.resourceUri', 'type': 'str'},
'id_properties_id': {'key': 'properties.id', 'type': 'str'},
'properties': {'key': 'properties.properties', 'type': '{str}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(ResourceMetricDefinition, self).__init__(kind=kind, **kwargs)
self.name_properties_name = None
self.unit = None
self.primary_aggregation_type = None
self.metric_availabilities = None
self.resource_uri = None
self.id_properties_id = None
self.properties = None
class ResourceMetricDefinitionCollection(msrest.serialization.Model):
"""Collection of metric definitions.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.ResourceMetricDefinition]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceMetricDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ResourceMetricDefinition"],
**kwargs
):
super(ResourceMetricDefinitionCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ResourceMetricName(msrest.serialization.Model):
"""Name of a metric for any resource .
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: metric name value.
:vartype value: str
:ivar localized_value: Localized metric name value.
:vartype localized_value: str
"""
_validation = {
'value': {'readonly': True},
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceMetricName, self).__init__(**kwargs)
self.value = None
self.localized_value = None
class ResourceMetricProperty(msrest.serialization.Model):
"""Resource metric property.
:param key: Key for resource metric property.
:type key: str
:param value: Value of pair.
:type value: str
"""
_attribute_map = {
'key': {'key': 'key', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
key: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(ResourceMetricProperty, self).__init__(**kwargs)
self.key = key
self.value = value
class ResourceMetricValue(msrest.serialization.Model):
"""Value of resource metric.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar timestamp: Value timestamp.
:vartype timestamp: str
:ivar average: Value average.
:vartype average: float
:ivar minimum: Value minimum.
:vartype minimum: float
:ivar maximum: Value maximum.
:vartype maximum: float
:ivar total: Value total.
:vartype total: float
:ivar count: Value count.
:vartype count: float
:ivar properties: Resource metric properties collection.
:vartype properties: list[~azure.mgmt.web.v2016_08_01.models.ResourceMetricProperty]
"""
_validation = {
'timestamp': {'readonly': True},
'average': {'readonly': True},
'minimum': {'readonly': True},
'maximum': {'readonly': True},
'total': {'readonly': True},
'count': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'timestamp': {'key': 'timestamp', 'type': 'str'},
'average': {'key': 'average', 'type': 'float'},
'minimum': {'key': 'minimum', 'type': 'float'},
'maximum': {'key': 'maximum', 'type': 'float'},
'total': {'key': 'total', 'type': 'float'},
'count': {'key': 'count', 'type': 'float'},
'properties': {'key': 'properties', 'type': '[ResourceMetricProperty]'},
}
def __init__(
self,
**kwargs
):
super(ResourceMetricValue, self).__init__(**kwargs)
self.timestamp = None
self.average = None
self.minimum = None
self.maximum = None
self.total = None
self.count = None
self.properties = None
class RestoreRequest(ProxyOnlyResource):
"""Description of a restore request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param storage_account_url: SAS URL to the container.
:type storage_account_url: str
:param blob_name: Name of a blob which contains the backup.
:type blob_name: str
:param overwrite: :code:`<code>true</code>` if the restore operation can overwrite target app;
otherwise, :code:`<code>false</code>`. :code:`<code>true</code>` is needed if trying to restore
over an existing app.
:type overwrite: bool
:param site_name: Name of an app.
:type site_name: str
:param databases: Collection of databases which should be restored. This list has to match the
list of databases included in the backup.
:type databases: list[~azure.mgmt.web.v2016_08_01.models.DatabaseBackupSetting]
:param ignore_conflicting_host_names: Changes a logic when restoring an app with custom
domains. :code:`<code>true</code>` to remove custom domains automatically. If
:code:`<code>false</code>`, custom domains are added to
the app's object when it is being restored, but that might fail due to conflicts during the
operation.
:type ignore_conflicting_host_names: bool
:param ignore_databases: Ignore the databases and only restore the site content.
:type ignore_databases: bool
:param app_service_plan: Specify app service plan that will own restored site.
:type app_service_plan: str
:param operation_type: Operation type. Possible values include: "Default", "Clone",
"Relocation", "Snapshot".
:type operation_type: str or ~azure.mgmt.web.v2016_08_01.models.BackupRestoreOperationType
:param adjust_connection_strings: :code:`<code>true</code>` if SiteConfig.ConnectionStrings
should be set in new app; otherwise, :code:`<code>false</code>`.
:type adjust_connection_strings: bool
:param hosting_environment: App Service Environment name, if needed (only when restoring an app
to an App Service Environment).
:type hosting_environment: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'storage_account_url': {'key': 'properties.storageAccountUrl', 'type': 'str'},
'blob_name': {'key': 'properties.blobName', 'type': 'str'},
'overwrite': {'key': 'properties.overwrite', 'type': 'bool'},
'site_name': {'key': 'properties.siteName', 'type': 'str'},
'databases': {'key': 'properties.databases', 'type': '[DatabaseBackupSetting]'},
'ignore_conflicting_host_names': {'key': 'properties.ignoreConflictingHostNames', 'type': 'bool'},
'ignore_databases': {'key': 'properties.ignoreDatabases', 'type': 'bool'},
'app_service_plan': {'key': 'properties.appServicePlan', 'type': 'str'},
'operation_type': {'key': 'properties.operationType', 'type': 'str'},
'adjust_connection_strings': {'key': 'properties.adjustConnectionStrings', 'type': 'bool'},
'hosting_environment': {'key': 'properties.hostingEnvironment', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
storage_account_url: Optional[str] = None,
blob_name: Optional[str] = None,
overwrite: Optional[bool] = None,
site_name: Optional[str] = None,
databases: Optional[List["DatabaseBackupSetting"]] = None,
ignore_conflicting_host_names: Optional[bool] = False,
ignore_databases: Optional[bool] = False,
app_service_plan: Optional[str] = None,
operation_type: Optional[Union[str, "BackupRestoreOperationType"]] = None,
adjust_connection_strings: Optional[bool] = None,
hosting_environment: Optional[str] = None,
**kwargs
):
super(RestoreRequest, self).__init__(kind=kind, **kwargs)
self.storage_account_url = storage_account_url
self.blob_name = blob_name
self.overwrite = overwrite
self.site_name = site_name
self.databases = databases
self.ignore_conflicting_host_names = ignore_conflicting_host_names
self.ignore_databases = ignore_databases
self.app_service_plan = app_service_plan
self.operation_type = operation_type
self.adjust_connection_strings = adjust_connection_strings
self.hosting_environment = hosting_environment
class RestoreResponse(ProxyOnlyResource):
"""Response for an app restore request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar operation_id: When server starts the restore process, it will return an operation ID
identifying that particular restore operation.
:vartype operation_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'operation_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'operation_id': {'key': 'properties.operationId', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(RestoreResponse, self).__init__(kind=kind, **kwargs)
self.operation_id = None
class Site(Resource):
"""A web app, a mobile app backend, or an API app.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param identity: Managed service identity.
:type identity: ~azure.mgmt.web.v2016_08_01.models.ManagedServiceIdentity
:ivar state: Current state of the app.
:vartype state: str
:ivar host_names: Hostnames associated with the app.
:vartype host_names: list[str]
:ivar repository_site_name: Name of the repository site.
:vartype repository_site_name: str
:ivar usage_state: State indicating whether the app has exceeded its quota usage. Read-only.
Possible values include: "Normal", "Exceeded".
:vartype usage_state: str or ~azure.mgmt.web.v2016_08_01.models.UsageState
:param enabled: :code:`<code>true</code>` if the app is enabled; otherwise,
:code:`<code>false</code>`. Setting this value to false disables the app (takes the app
offline).
:type enabled: bool
:ivar enabled_host_names: Enabled hostnames for the app.Hostnames need to be assigned (see
HostNames) AND enabled. Otherwise,
the app is not served on those hostnames.
:vartype enabled_host_names: list[str]
:ivar availability_state: Management information availability state for the app. Possible
values include: "Normal", "Limited", "DisasterRecoveryMode".
:vartype availability_state: str or ~azure.mgmt.web.v2016_08_01.models.SiteAvailabilityState
:param host_name_ssl_states: Hostname SSL states are used to manage the SSL bindings for app's
hostnames.
:type host_name_ssl_states: list[~azure.mgmt.web.v2016_08_01.models.HostNameSslState]
:param server_farm_id: Resource ID of the associated App Service plan, formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:type server_farm_id: str
:param reserved: :code:`<code>true</code>` if reserved; otherwise, :code:`<code>false</code>`.
:type reserved: bool
:ivar last_modified_time_utc: Last time the app was modified, in UTC. Read-only.
:vartype last_modified_time_utc: ~datetime.datetime
:param site_config: Configuration of the app.
:type site_config: ~azure.mgmt.web.v2016_08_01.models.SiteConfig
:ivar traffic_manager_host_names: Azure Traffic Manager hostnames associated with the app.
Read-only.
:vartype traffic_manager_host_names: list[str]
:param scm_site_also_stopped: :code:`<code>true</code>` to stop SCM (KUDU) site when the app is
stopped; otherwise, :code:`<code>false</code>`. The default is :code:`<code>false</code>`.
:type scm_site_also_stopped: bool
:ivar target_swap_slot: Specifies which deployment slot this app will swap into. Read-only.
:vartype target_swap_slot: str
:param hosting_environment_profile: App Service Environment to use for the app.
:type hosting_environment_profile: ~azure.mgmt.web.v2016_08_01.models.HostingEnvironmentProfile
:param client_affinity_enabled: :code:`<code>true</code>` to enable client affinity;
:code:`<code>false</code>` to stop sending session affinity cookies, which route client
requests in the same session to the same instance. Default is :code:`<code>true</code>`.
:type client_affinity_enabled: bool
:param client_cert_enabled: :code:`<code>true</code>` to enable client certificate
authentication (TLS mutual authentication); otherwise, :code:`<code>false</code>`. Default is
:code:`<code>false</code>`.
:type client_cert_enabled: bool
:param host_names_disabled: :code:`<code>true</code>` to disable the public hostnames of the
app; otherwise, :code:`<code>false</code>`.
If :code:`<code>true</code>`, the app is only accessible via API management process.
:type host_names_disabled: bool
:ivar outbound_ip_addresses: List of IP addresses that the app uses for outbound connections
(e.g. database access). Includes VIPs from tenants that site can be hosted with current
settings. Read-only.
:vartype outbound_ip_addresses: str
:ivar possible_outbound_ip_addresses: List of IP addresses that the app uses for outbound
connections (e.g. database access). Includes VIPs from all tenants. Read-only.
:vartype possible_outbound_ip_addresses: str
:param container_size: Size of the function container.
:type container_size: int
:param daily_memory_time_quota: Maximum allowed daily memory-time quota (applicable on dynamic
apps only).
:type daily_memory_time_quota: int
:ivar suspended_till: App suspended till in case memory-time quota is exceeded.
:vartype suspended_till: ~datetime.datetime
:ivar max_number_of_workers: Maximum number of workers.
This only applies to Functions container.
:vartype max_number_of_workers: int
:param cloning_info: If specified during app creation, the app is cloned from a source app.
:type cloning_info: ~azure.mgmt.web.v2016_08_01.models.CloningInfo
:param snapshot_info: If specified during app creation, the app is created from a previous
snapshot.
:type snapshot_info: ~azure.mgmt.web.v2016_08_01.models.SnapshotRecoveryRequest
:ivar resource_group: Name of the resource group the app belongs to. Read-only.
:vartype resource_group: str
:ivar is_default_container: :code:`<code>true</code>` if the app is a default container;
otherwise, :code:`<code>false</code>`.
:vartype is_default_container: bool
:ivar default_host_name: Default hostname of the app. Read-only.
:vartype default_host_name: str
:ivar slot_swap_status: Status of the last deployment slot swap operation.
:vartype slot_swap_status: ~azure.mgmt.web.v2016_08_01.models.SlotSwapStatus
:param https_only: HttpsOnly: configures a web site to accept only https requests. Issues
redirect for
http requests.
:type https_only: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'state': {'readonly': True},
'host_names': {'readonly': True},
'repository_site_name': {'readonly': True},
'usage_state': {'readonly': True},
'enabled_host_names': {'readonly': True},
'availability_state': {'readonly': True},
'last_modified_time_utc': {'readonly': True},
'traffic_manager_host_names': {'readonly': True},
'target_swap_slot': {'readonly': True},
'outbound_ip_addresses': {'readonly': True},
'possible_outbound_ip_addresses': {'readonly': True},
'suspended_till': {'readonly': True},
'max_number_of_workers': {'readonly': True},
'resource_group': {'readonly': True},
'is_default_container': {'readonly': True},
'default_host_name': {'readonly': True},
'slot_swap_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'state': {'key': 'properties.state', 'type': 'str'},
'host_names': {'key': 'properties.hostNames', 'type': '[str]'},
'repository_site_name': {'key': 'properties.repositorySiteName', 'type': 'str'},
'usage_state': {'key': 'properties.usageState', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'enabled_host_names': {'key': 'properties.enabledHostNames', 'type': '[str]'},
'availability_state': {'key': 'properties.availabilityState', 'type': 'str'},
'host_name_ssl_states': {'key': 'properties.hostNameSslStates', 'type': '[HostNameSslState]'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
'reserved': {'key': 'properties.reserved', 'type': 'bool'},
'last_modified_time_utc': {'key': 'properties.lastModifiedTimeUtc', 'type': 'iso-8601'},
'site_config': {'key': 'properties.siteConfig', 'type': 'SiteConfig'},
'traffic_manager_host_names': {'key': 'properties.trafficManagerHostNames', 'type': '[str]'},
'scm_site_also_stopped': {'key': 'properties.scmSiteAlsoStopped', 'type': 'bool'},
'target_swap_slot': {'key': 'properties.targetSwapSlot', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'client_affinity_enabled': {'key': 'properties.clientAffinityEnabled', 'type': 'bool'},
'client_cert_enabled': {'key': 'properties.clientCertEnabled', 'type': 'bool'},
'host_names_disabled': {'key': 'properties.hostNamesDisabled', 'type': 'bool'},
'outbound_ip_addresses': {'key': 'properties.outboundIpAddresses', 'type': 'str'},
'possible_outbound_ip_addresses': {'key': 'properties.possibleOutboundIpAddresses', 'type': 'str'},
'container_size': {'key': 'properties.containerSize', 'type': 'int'},
'daily_memory_time_quota': {'key': 'properties.dailyMemoryTimeQuota', 'type': 'int'},
'suspended_till': {'key': 'properties.suspendedTill', 'type': 'iso-8601'},
'max_number_of_workers': {'key': 'properties.maxNumberOfWorkers', 'type': 'int'},
'cloning_info': {'key': 'properties.cloningInfo', 'type': 'CloningInfo'},
'snapshot_info': {'key': 'properties.snapshotInfo', 'type': 'SnapshotRecoveryRequest'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'is_default_container': {'key': 'properties.isDefaultContainer', 'type': 'bool'},
'default_host_name': {'key': 'properties.defaultHostName', 'type': 'str'},
'slot_swap_status': {'key': 'properties.slotSwapStatus', 'type': 'SlotSwapStatus'},
'https_only': {'key': 'properties.httpsOnly', 'type': 'bool'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
identity: Optional["ManagedServiceIdentity"] = None,
enabled: Optional[bool] = None,
host_name_ssl_states: Optional[List["HostNameSslState"]] = None,
server_farm_id: Optional[str] = None,
reserved: Optional[bool] = False,
site_config: Optional["SiteConfig"] = None,
scm_site_also_stopped: Optional[bool] = False,
hosting_environment_profile: Optional["HostingEnvironmentProfile"] = None,
client_affinity_enabled: Optional[bool] = None,
client_cert_enabled: Optional[bool] = None,
host_names_disabled: Optional[bool] = None,
container_size: Optional[int] = None,
daily_memory_time_quota: Optional[int] = None,
cloning_info: Optional["CloningInfo"] = None,
snapshot_info: Optional["SnapshotRecoveryRequest"] = None,
https_only: Optional[bool] = None,
**kwargs
):
super(Site, self).__init__(kind=kind, location=location, tags=tags, **kwargs)
self.identity = identity
self.state = None
self.host_names = None
self.repository_site_name = None
self.usage_state = None
self.enabled = enabled
self.enabled_host_names = None
self.availability_state = None
self.host_name_ssl_states = host_name_ssl_states
self.server_farm_id = server_farm_id
self.reserved = reserved
self.last_modified_time_utc = None
self.site_config = site_config
self.traffic_manager_host_names = None
self.scm_site_also_stopped = scm_site_also_stopped
self.target_swap_slot = None
self.hosting_environment_profile = hosting_environment_profile
self.client_affinity_enabled = client_affinity_enabled
self.client_cert_enabled = client_cert_enabled
self.host_names_disabled = host_names_disabled
self.outbound_ip_addresses = None
self.possible_outbound_ip_addresses = None
self.container_size = container_size
self.daily_memory_time_quota = daily_memory_time_quota
self.suspended_till = None
self.max_number_of_workers = None
self.cloning_info = cloning_info
self.snapshot_info = snapshot_info
self.resource_group = None
self.is_default_container = None
self.default_host_name = None
self.slot_swap_status = None
self.https_only = https_only
class SiteAuthSettings(ProxyOnlyResource):
"""Configuration settings for the Azure App Service Authentication / Authorization feature.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param enabled: :code:`<code>true</code>` if the Authentication / Authorization feature is
enabled for the current app; otherwise, :code:`<code>false</code>`.
:type enabled: bool
:param runtime_version: The RuntimeVersion of the Authentication / Authorization feature in use
for the current app.
The setting in this value can control the behavior of certain features in the Authentication /
Authorization module.
:type runtime_version: str
:param unauthenticated_client_action: The action to take when an unauthenticated client
attempts to access the app. Possible values include: "RedirectToLoginPage", "AllowAnonymous".
:type unauthenticated_client_action: str or
~azure.mgmt.web.v2016_08_01.models.UnauthenticatedClientAction
:param token_store_enabled: :code:`<code>true</code>` to durably store platform-specific
security tokens that are obtained during login flows; otherwise, :code:`<code>false</code>`.
The default is :code:`<code>false</code>`.
:type token_store_enabled: bool
:param allowed_external_redirect_urls: External URLs that can be redirected to as part of
logging in or logging out of the app. Note that the query string part of the URL is ignored.
This is an advanced setting typically only needed by Windows Store application backends.
Note that URLs within the current domain are always implicitly allowed.
:type allowed_external_redirect_urls: list[str]
:param default_provider: The default authentication provider to use when multiple providers are
configured.
This setting is only needed if multiple providers are configured and the unauthenticated
client
action is set to "RedirectToLoginPage". Possible values include: "AzureActiveDirectory",
"Facebook", "Google", "MicrosoftAccount", "Twitter".
:type default_provider: str or ~azure.mgmt.web.v2016_08_01.models.BuiltInAuthenticationProvider
:param token_refresh_extension_hours: The number of hours after session token expiration that a
session token can be used to
call the token refresh API. The default is 72 hours.
:type token_refresh_extension_hours: float
:param client_id: The Client ID of this relying party application, known as the client_id.
This setting is required for enabling OpenID Connection authentication with Azure Active
Directory or
other 3rd party OpenID Connect providers.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html.
:type client_id: str
:param client_secret: The Client Secret of this relying party application (in Azure Active
Directory, this is also referred to as the Key).
This setting is optional. If no client secret is configured, the OpenID Connect implicit auth
flow is used to authenticate end users.
Otherwise, the OpenID Connect Authorization Code Flow is used to authenticate end users.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html.
:type client_secret: str
:param issuer: The OpenID Connect Issuer URI that represents the entity which issues access
tokens for this application.
When using Azure Active Directory, this value is the URI of the directory tenant, e.g.
https://sts.windows.net/{tenant-guid}/.
This URI is a case-sensitive identifier for the token issuer.
More information on OpenID Connect Discovery:
http://openid.net/specs/openid-connect-discovery-1_0.html.
:type issuer: str
:param allowed_audiences: Allowed audience values to consider when validating JWTs issued by
Azure Active Directory. Note that the :code:`<code>ClientID</code>` value is always considered
an
allowed audience, regardless of this setting.
:type allowed_audiences: list[str]
:param additional_login_params: Login parameters to send to the OpenID Connect authorization
endpoint when
a user logs in. Each parameter must be in the form "key=value".
:type additional_login_params: list[str]
:param google_client_id: The OpenID Connect Client ID for the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/.
:type google_client_id: str
:param google_client_secret: The client secret associated with the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/.
:type google_client_secret: str
:param google_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of Google
Sign-In authentication.
This setting is optional. If not specified, "openid", "profile", and "email" are used as
default scopes.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/.
:type google_o_auth_scopes: list[str]
:param facebook_app_id: The App ID of the Facebook app used for login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login.
:type facebook_app_id: str
:param facebook_app_secret: The App Secret of the Facebook app used for Facebook Login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login.
:type facebook_app_secret: str
:param facebook_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of Facebook
Login authentication.
This setting is optional.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login.
:type facebook_o_auth_scopes: list[str]
:param twitter_consumer_key: The OAuth 1.0a consumer key of the Twitter application used for
sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in.
:type twitter_consumer_key: str
:param twitter_consumer_secret: The OAuth 1.0a consumer secret of the Twitter application used
for sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in.
:type twitter_consumer_secret: str
:param microsoft_account_client_id: The OAuth 2.0 client ID that was created for the app used
for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm.
:type microsoft_account_client_id: str
:param microsoft_account_client_secret: The OAuth 2.0 client secret that was created for the
app used for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm.
:type microsoft_account_client_secret: str
:param microsoft_account_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of
Microsoft Account authentication.
This setting is optional. If not specified, "wl.basic" is used as the default scope.
Microsoft Account Scopes and permissions documentation:
https://msdn.microsoft.com/en-us/library/dn631845.aspx.
:type microsoft_account_o_auth_scopes: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'runtime_version': {'key': 'properties.runtimeVersion', 'type': 'str'},
'unauthenticated_client_action': {'key': 'properties.unauthenticatedClientAction', 'type': 'str'},
'token_store_enabled': {'key': 'properties.tokenStoreEnabled', 'type': 'bool'},
'allowed_external_redirect_urls': {'key': 'properties.allowedExternalRedirectUrls', 'type': '[str]'},
'default_provider': {'key': 'properties.defaultProvider', 'type': 'str'},
'token_refresh_extension_hours': {'key': 'properties.tokenRefreshExtensionHours', 'type': 'float'},
'client_id': {'key': 'properties.clientId', 'type': 'str'},
'client_secret': {'key': 'properties.clientSecret', 'type': 'str'},
'issuer': {'key': 'properties.issuer', 'type': 'str'},
'allowed_audiences': {'key': 'properties.allowedAudiences', 'type': '[str]'},
'additional_login_params': {'key': 'properties.additionalLoginParams', 'type': '[str]'},
'google_client_id': {'key': 'properties.googleClientId', 'type': 'str'},
'google_client_secret': {'key': 'properties.googleClientSecret', 'type': 'str'},
'google_o_auth_scopes': {'key': 'properties.googleOAuthScopes', 'type': '[str]'},
'facebook_app_id': {'key': 'properties.facebookAppId', 'type': 'str'},
'facebook_app_secret': {'key': 'properties.facebookAppSecret', 'type': 'str'},
'facebook_o_auth_scopes': {'key': 'properties.facebookOAuthScopes', 'type': '[str]'},
'twitter_consumer_key': {'key': 'properties.twitterConsumerKey', 'type': 'str'},
'twitter_consumer_secret': {'key': 'properties.twitterConsumerSecret', 'type': 'str'},
'microsoft_account_client_id': {'key': 'properties.microsoftAccountClientId', 'type': 'str'},
'microsoft_account_client_secret': {'key': 'properties.microsoftAccountClientSecret', 'type': 'str'},
'microsoft_account_o_auth_scopes': {'key': 'properties.microsoftAccountOAuthScopes', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
enabled: Optional[bool] = None,
runtime_version: Optional[str] = None,
unauthenticated_client_action: Optional[Union[str, "UnauthenticatedClientAction"]] = None,
token_store_enabled: Optional[bool] = None,
allowed_external_redirect_urls: Optional[List[str]] = None,
default_provider: Optional[Union[str, "BuiltInAuthenticationProvider"]] = None,
token_refresh_extension_hours: Optional[float] = None,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
issuer: Optional[str] = None,
allowed_audiences: Optional[List[str]] = None,
additional_login_params: Optional[List[str]] = None,
google_client_id: Optional[str] = None,
google_client_secret: Optional[str] = None,
google_o_auth_scopes: Optional[List[str]] = None,
facebook_app_id: Optional[str] = None,
facebook_app_secret: Optional[str] = None,
facebook_o_auth_scopes: Optional[List[str]] = None,
twitter_consumer_key: Optional[str] = None,
twitter_consumer_secret: Optional[str] = None,
microsoft_account_client_id: Optional[str] = None,
microsoft_account_client_secret: Optional[str] = None,
microsoft_account_o_auth_scopes: Optional[List[str]] = None,
**kwargs
):
super(SiteAuthSettings, self).__init__(kind=kind, **kwargs)
self.enabled = enabled
self.runtime_version = runtime_version
self.unauthenticated_client_action = unauthenticated_client_action
self.token_store_enabled = token_store_enabled
self.allowed_external_redirect_urls = allowed_external_redirect_urls
self.default_provider = default_provider
self.token_refresh_extension_hours = token_refresh_extension_hours
self.client_id = client_id
self.client_secret = client_secret
self.issuer = issuer
self.allowed_audiences = allowed_audiences
self.additional_login_params = additional_login_params
self.google_client_id = google_client_id
self.google_client_secret = google_client_secret
self.google_o_auth_scopes = google_o_auth_scopes
self.facebook_app_id = facebook_app_id
self.facebook_app_secret = facebook_app_secret
self.facebook_o_auth_scopes = facebook_o_auth_scopes
self.twitter_consumer_key = twitter_consumer_key
self.twitter_consumer_secret = twitter_consumer_secret
self.microsoft_account_client_id = microsoft_account_client_id
self.microsoft_account_client_secret = microsoft_account_client_secret
self.microsoft_account_o_auth_scopes = microsoft_account_o_auth_scopes
class SiteCloneability(msrest.serialization.Model):
"""Represents whether or not an app is cloneable.
:param result: Name of app. Possible values include: "Cloneable", "PartiallyCloneable",
"NotCloneable".
:type result: str or ~azure.mgmt.web.v2016_08_01.models.CloneAbilityResult
:param blocking_features: List of features enabled on app that prevent cloning.
:type blocking_features: list[~azure.mgmt.web.v2016_08_01.models.SiteCloneabilityCriterion]
:param unsupported_features: List of features enabled on app that are non-blocking but cannot
be cloned. The app can still be cloned
but the features in this list will not be set up on cloned app.
:type unsupported_features: list[~azure.mgmt.web.v2016_08_01.models.SiteCloneabilityCriterion]
:param blocking_characteristics: List of blocking application characteristics.
:type blocking_characteristics:
list[~azure.mgmt.web.v2016_08_01.models.SiteCloneabilityCriterion]
"""
_attribute_map = {
'result': {'key': 'result', 'type': 'str'},
'blocking_features': {'key': 'blockingFeatures', 'type': '[SiteCloneabilityCriterion]'},
'unsupported_features': {'key': 'unsupportedFeatures', 'type': '[SiteCloneabilityCriterion]'},
'blocking_characteristics': {'key': 'blockingCharacteristics', 'type': '[SiteCloneabilityCriterion]'},
}
def __init__(
self,
*,
result: Optional[Union[str, "CloneAbilityResult"]] = None,
blocking_features: Optional[List["SiteCloneabilityCriterion"]] = None,
unsupported_features: Optional[List["SiteCloneabilityCriterion"]] = None,
blocking_characteristics: Optional[List["SiteCloneabilityCriterion"]] = None,
**kwargs
):
super(SiteCloneability, self).__init__(**kwargs)
self.result = result
self.blocking_features = blocking_features
self.unsupported_features = unsupported_features
self.blocking_characteristics = blocking_characteristics
class SiteCloneabilityCriterion(msrest.serialization.Model):
"""An app cloneability criterion.
:param name: Name of criterion.
:type name: str
:param description: Description of criterion.
:type description: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(SiteCloneabilityCriterion, self).__init__(**kwargs)
self.name = name
self.description = description
class SiteConfig(msrest.serialization.Model):
"""Configuration of an App Service app.
Variables are only populated by the server, and will be ignored when sending a request.
:param number_of_workers: Number of workers.
:type number_of_workers: int
:param default_documents: Default documents.
:type default_documents: list[str]
:param net_framework_version: .NET Framework version.
:type net_framework_version: str
:param php_version: Version of PHP.
:type php_version: str
:param python_version: Version of Python.
:type python_version: str
:param node_version: Version of Node.js.
:type node_version: str
:param linux_fx_version: Linux App Framework and version.
:type linux_fx_version: str
:param request_tracing_enabled: :code:`<code>true</code>` if request tracing is enabled;
otherwise, :code:`<code>false</code>`.
:type request_tracing_enabled: bool
:param request_tracing_expiration_time: Request tracing expiration time.
:type request_tracing_expiration_time: ~datetime.datetime
:param remote_debugging_enabled: :code:`<code>true</code>` if remote debugging is enabled;
otherwise, :code:`<code>false</code>`.
:type remote_debugging_enabled: bool
:param remote_debugging_version: Remote debugging version.
:type remote_debugging_version: str
:param http_logging_enabled: :code:`<code>true</code>` if HTTP logging is enabled; otherwise,
:code:`<code>false</code>`.
:type http_logging_enabled: bool
:param logs_directory_size_limit: HTTP logs directory size limit.
:type logs_directory_size_limit: int
:param detailed_error_logging_enabled: :code:`<code>true</code>` if detailed error logging is
enabled; otherwise, :code:`<code>false</code>`.
:type detailed_error_logging_enabled: bool
:param publishing_username: Publishing user name.
:type publishing_username: str
:param app_settings: Application settings.
:type app_settings: list[~azure.mgmt.web.v2016_08_01.models.NameValuePair]
:param connection_strings: Connection strings.
:type connection_strings: list[~azure.mgmt.web.v2016_08_01.models.ConnStringInfo]
:ivar machine_key: Site MachineKey.
:vartype machine_key: ~azure.mgmt.web.v2016_08_01.models.SiteMachineKey
:param handler_mappings: Handler mappings.
:type handler_mappings: list[~azure.mgmt.web.v2016_08_01.models.HandlerMapping]
:param document_root: Document root.
:type document_root: str
:param scm_type: SCM type. Possible values include: "None", "Dropbox", "Tfs", "LocalGit",
"GitHub", "CodePlexGit", "CodePlexHg", "BitbucketGit", "BitbucketHg", "ExternalGit",
"ExternalHg", "OneDrive", "VSO".
:type scm_type: str or ~azure.mgmt.web.v2016_08_01.models.ScmType
:param use32_bit_worker_process: :code:`<code>true</code>` to use 32-bit worker process;
otherwise, :code:`<code>false</code>`.
:type use32_bit_worker_process: bool
:param web_sockets_enabled: :code:`<code>true</code>` if WebSocket is enabled; otherwise,
:code:`<code>false</code>`.
:type web_sockets_enabled: bool
:param always_on: :code:`<code>true</code>` if Always On is enabled; otherwise,
:code:`<code>false</code>`.
:type always_on: bool
:param java_version: Java version.
:type java_version: str
:param java_container: Java container.
:type java_container: str
:param java_container_version: Java container version.
:type java_container_version: str
:param app_command_line: App command line to launch.
:type app_command_line: str
:param managed_pipeline_mode: Managed pipeline mode. Possible values include: "Integrated",
"Classic".
:type managed_pipeline_mode: str or ~azure.mgmt.web.v2016_08_01.models.ManagedPipelineMode
:param virtual_applications: Virtual applications.
:type virtual_applications: list[~azure.mgmt.web.v2016_08_01.models.VirtualApplication]
:param load_balancing: Site load balancing. Possible values include: "WeightedRoundRobin",
"LeastRequests", "LeastResponseTime", "WeightedTotalTraffic", "RequestHash".
:type load_balancing: str or ~azure.mgmt.web.v2016_08_01.models.SiteLoadBalancing
:param experiments: This is work around for polymorphic types.
:type experiments: ~azure.mgmt.web.v2016_08_01.models.Experiments
:param limits: Site limits.
:type limits: ~azure.mgmt.web.v2016_08_01.models.SiteLimits
:param auto_heal_enabled: :code:`<code>true</code>` if Auto Heal is enabled; otherwise,
:code:`<code>false</code>`.
:type auto_heal_enabled: bool
:param auto_heal_rules: Auto Heal rules.
:type auto_heal_rules: ~azure.mgmt.web.v2016_08_01.models.AutoHealRules
:param tracing_options: Tracing options.
:type tracing_options: str
:param vnet_name: Virtual Network name.
:type vnet_name: str
:param cors: Cross-Origin Resource Sharing (CORS) settings.
:type cors: ~azure.mgmt.web.v2016_08_01.models.CorsSettings
:param push: Push endpoint settings.
:type push: ~azure.mgmt.web.v2016_08_01.models.PushSettings
:param api_definition: Information about the formal API definition for the app.
:type api_definition: ~azure.mgmt.web.v2016_08_01.models.ApiDefinitionInfo
:param auto_swap_slot_name: Auto-swap slot name.
:type auto_swap_slot_name: str
:param local_my_sql_enabled: :code:`<code>true</code>` to enable local MySQL; otherwise,
:code:`<code>false</code>`.
:type local_my_sql_enabled: bool
:param ip_security_restrictions: IP security restrictions.
:type ip_security_restrictions: list[~azure.mgmt.web.v2016_08_01.models.IpSecurityRestriction]
:param http20_enabled: Http20Enabled: configures a web site to allow clients to connect over
http2.0.
:type http20_enabled: bool
:param min_tls_version: MinTlsVersion: configures the minimum version of TLS required for SSL
requests. Possible values include: "1.0", "1.1", "1.2".
:type min_tls_version: str or ~azure.mgmt.web.v2016_08_01.models.SupportedTlsVersions
"""
_validation = {
'machine_key': {'readonly': True},
}
_attribute_map = {
'number_of_workers': {'key': 'numberOfWorkers', 'type': 'int'},
'default_documents': {'key': 'defaultDocuments', 'type': '[str]'},
'net_framework_version': {'key': 'netFrameworkVersion', 'type': 'str'},
'php_version': {'key': 'phpVersion', 'type': 'str'},
'python_version': {'key': 'pythonVersion', 'type': 'str'},
'node_version': {'key': 'nodeVersion', 'type': 'str'},
'linux_fx_version': {'key': 'linuxFxVersion', 'type': 'str'},
'request_tracing_enabled': {'key': 'requestTracingEnabled', 'type': 'bool'},
'request_tracing_expiration_time': {'key': 'requestTracingExpirationTime', 'type': 'iso-8601'},
'remote_debugging_enabled': {'key': 'remoteDebuggingEnabled', 'type': 'bool'},
'remote_debugging_version': {'key': 'remoteDebuggingVersion', 'type': 'str'},
'http_logging_enabled': {'key': 'httpLoggingEnabled', 'type': 'bool'},
'logs_directory_size_limit': {'key': 'logsDirectorySizeLimit', 'type': 'int'},
'detailed_error_logging_enabled': {'key': 'detailedErrorLoggingEnabled', 'type': 'bool'},
'publishing_username': {'key': 'publishingUsername', 'type': 'str'},
'app_settings': {'key': 'appSettings', 'type': '[NameValuePair]'},
'connection_strings': {'key': 'connectionStrings', 'type': '[ConnStringInfo]'},
'machine_key': {'key': 'machineKey', 'type': 'SiteMachineKey'},
'handler_mappings': {'key': 'handlerMappings', 'type': '[HandlerMapping]'},
'document_root': {'key': 'documentRoot', 'type': 'str'},
'scm_type': {'key': 'scmType', 'type': 'str'},
'use32_bit_worker_process': {'key': 'use32BitWorkerProcess', 'type': 'bool'},
'web_sockets_enabled': {'key': 'webSocketsEnabled', 'type': 'bool'},
'always_on': {'key': 'alwaysOn', 'type': 'bool'},
'java_version': {'key': 'javaVersion', 'type': 'str'},
'java_container': {'key': 'javaContainer', 'type': 'str'},
'java_container_version': {'key': 'javaContainerVersion', 'type': 'str'},
'app_command_line': {'key': 'appCommandLine', 'type': 'str'},
'managed_pipeline_mode': {'key': 'managedPipelineMode', 'type': 'str'},
'virtual_applications': {'key': 'virtualApplications', 'type': '[VirtualApplication]'},
'load_balancing': {'key': 'loadBalancing', 'type': 'str'},
'experiments': {'key': 'experiments', 'type': 'Experiments'},
'limits': {'key': 'limits', 'type': 'SiteLimits'},
'auto_heal_enabled': {'key': 'autoHealEnabled', 'type': 'bool'},
'auto_heal_rules': {'key': 'autoHealRules', 'type': 'AutoHealRules'},
'tracing_options': {'key': 'tracingOptions', 'type': 'str'},
'vnet_name': {'key': 'vnetName', 'type': 'str'},
'cors': {'key': 'cors', 'type': 'CorsSettings'},
'push': {'key': 'push', 'type': 'PushSettings'},
'api_definition': {'key': 'apiDefinition', 'type': 'ApiDefinitionInfo'},
'auto_swap_slot_name': {'key': 'autoSwapSlotName', 'type': 'str'},
'local_my_sql_enabled': {'key': 'localMySqlEnabled', 'type': 'bool'},
'ip_security_restrictions': {'key': 'ipSecurityRestrictions', 'type': '[IpSecurityRestriction]'},
'http20_enabled': {'key': 'http20Enabled', 'type': 'bool'},
'min_tls_version': {'key': 'minTlsVersion', 'type': 'str'},
}
def __init__(
self,
*,
number_of_workers: Optional[int] = None,
default_documents: Optional[List[str]] = None,
net_framework_version: Optional[str] = "v4.6",
php_version: Optional[str] = None,
python_version: Optional[str] = None,
node_version: Optional[str] = None,
linux_fx_version: Optional[str] = None,
request_tracing_enabled: Optional[bool] = None,
request_tracing_expiration_time: Optional[datetime.datetime] = None,
remote_debugging_enabled: Optional[bool] = None,
remote_debugging_version: Optional[str] = None,
http_logging_enabled: Optional[bool] = None,
logs_directory_size_limit: Optional[int] = None,
detailed_error_logging_enabled: Optional[bool] = None,
publishing_username: Optional[str] = None,
app_settings: Optional[List["NameValuePair"]] = None,
connection_strings: Optional[List["ConnStringInfo"]] = None,
handler_mappings: Optional[List["HandlerMapping"]] = None,
document_root: Optional[str] = None,
scm_type: Optional[Union[str, "ScmType"]] = None,
use32_bit_worker_process: Optional[bool] = None,
web_sockets_enabled: Optional[bool] = None,
always_on: Optional[bool] = None,
java_version: Optional[str] = None,
java_container: Optional[str] = None,
java_container_version: Optional[str] = None,
app_command_line: Optional[str] = None,
managed_pipeline_mode: Optional[Union[str, "ManagedPipelineMode"]] = None,
virtual_applications: Optional[List["VirtualApplication"]] = None,
load_balancing: Optional[Union[str, "SiteLoadBalancing"]] = None,
experiments: Optional["Experiments"] = None,
limits: Optional["SiteLimits"] = None,
auto_heal_enabled: Optional[bool] = None,
auto_heal_rules: Optional["AutoHealRules"] = None,
tracing_options: Optional[str] = None,
vnet_name: Optional[str] = None,
cors: Optional["CorsSettings"] = None,
push: Optional["PushSettings"] = None,
api_definition: Optional["ApiDefinitionInfo"] = None,
auto_swap_slot_name: Optional[str] = None,
local_my_sql_enabled: Optional[bool] = False,
ip_security_restrictions: Optional[List["IpSecurityRestriction"]] = None,
http20_enabled: Optional[bool] = True,
min_tls_version: Optional[Union[str, "SupportedTlsVersions"]] = None,
**kwargs
):
super(SiteConfig, self).__init__(**kwargs)
self.number_of_workers = number_of_workers
self.default_documents = default_documents
self.net_framework_version = net_framework_version
self.php_version = php_version
self.python_version = python_version
self.node_version = node_version
self.linux_fx_version = linux_fx_version
self.request_tracing_enabled = request_tracing_enabled
self.request_tracing_expiration_time = request_tracing_expiration_time
self.remote_debugging_enabled = remote_debugging_enabled
self.remote_debugging_version = remote_debugging_version
self.http_logging_enabled = http_logging_enabled
self.logs_directory_size_limit = logs_directory_size_limit
self.detailed_error_logging_enabled = detailed_error_logging_enabled
self.publishing_username = publishing_username
self.app_settings = app_settings
self.connection_strings = connection_strings
self.machine_key = None
self.handler_mappings = handler_mappings
self.document_root = document_root
self.scm_type = scm_type
self.use32_bit_worker_process = use32_bit_worker_process
self.web_sockets_enabled = web_sockets_enabled
self.always_on = always_on
self.java_version = java_version
self.java_container = java_container
self.java_container_version = java_container_version
self.app_command_line = app_command_line
self.managed_pipeline_mode = managed_pipeline_mode
self.virtual_applications = virtual_applications
self.load_balancing = load_balancing
self.experiments = experiments
self.limits = limits
self.auto_heal_enabled = auto_heal_enabled
self.auto_heal_rules = auto_heal_rules
self.tracing_options = tracing_options
self.vnet_name = vnet_name
self.cors = cors
self.push = push
self.api_definition = api_definition
self.auto_swap_slot_name = auto_swap_slot_name
self.local_my_sql_enabled = local_my_sql_enabled
self.ip_security_restrictions = ip_security_restrictions
self.http20_enabled = http20_enabled
self.min_tls_version = min_tls_version
class SiteConfigResource(ProxyOnlyResource):
"""Web app configuration ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param number_of_workers: Number of workers.
:type number_of_workers: int
:param default_documents: Default documents.
:type default_documents: list[str]
:param net_framework_version: .NET Framework version.
:type net_framework_version: str
:param php_version: Version of PHP.
:type php_version: str
:param python_version: Version of Python.
:type python_version: str
:param node_version: Version of Node.js.
:type node_version: str
:param linux_fx_version: Linux App Framework and version.
:type linux_fx_version: str
:param request_tracing_enabled: :code:`<code>true</code>` if request tracing is enabled;
otherwise, :code:`<code>false</code>`.
:type request_tracing_enabled: bool
:param request_tracing_expiration_time: Request tracing expiration time.
:type request_tracing_expiration_time: ~datetime.datetime
:param remote_debugging_enabled: :code:`<code>true</code>` if remote debugging is enabled;
otherwise, :code:`<code>false</code>`.
:type remote_debugging_enabled: bool
:param remote_debugging_version: Remote debugging version.
:type remote_debugging_version: str
:param http_logging_enabled: :code:`<code>true</code>` if HTTP logging is enabled; otherwise,
:code:`<code>false</code>`.
:type http_logging_enabled: bool
:param logs_directory_size_limit: HTTP logs directory size limit.
:type logs_directory_size_limit: int
:param detailed_error_logging_enabled: :code:`<code>true</code>` if detailed error logging is
enabled; otherwise, :code:`<code>false</code>`.
:type detailed_error_logging_enabled: bool
:param publishing_username: Publishing user name.
:type publishing_username: str
:param app_settings: Application settings.
:type app_settings: list[~azure.mgmt.web.v2016_08_01.models.NameValuePair]
:param connection_strings: Connection strings.
:type connection_strings: list[~azure.mgmt.web.v2016_08_01.models.ConnStringInfo]
:ivar machine_key: Site MachineKey.
:vartype machine_key: ~azure.mgmt.web.v2016_08_01.models.SiteMachineKey
:param handler_mappings: Handler mappings.
:type handler_mappings: list[~azure.mgmt.web.v2016_08_01.models.HandlerMapping]
:param document_root: Document root.
:type document_root: str
:param scm_type: SCM type. Possible values include: "None", "Dropbox", "Tfs", "LocalGit",
"GitHub", "CodePlexGit", "CodePlexHg", "BitbucketGit", "BitbucketHg", "ExternalGit",
"ExternalHg", "OneDrive", "VSO".
:type scm_type: str or ~azure.mgmt.web.v2016_08_01.models.ScmType
:param use32_bit_worker_process: :code:`<code>true</code>` to use 32-bit worker process;
otherwise, :code:`<code>false</code>`.
:type use32_bit_worker_process: bool
:param web_sockets_enabled: :code:`<code>true</code>` if WebSocket is enabled; otherwise,
:code:`<code>false</code>`.
:type web_sockets_enabled: bool
:param always_on: :code:`<code>true</code>` if Always On is enabled; otherwise,
:code:`<code>false</code>`.
:type always_on: bool
:param java_version: Java version.
:type java_version: str
:param java_container: Java container.
:type java_container: str
:param java_container_version: Java container version.
:type java_container_version: str
:param app_command_line: App command line to launch.
:type app_command_line: str
:param managed_pipeline_mode: Managed pipeline mode. Possible values include: "Integrated",
"Classic".
:type managed_pipeline_mode: str or ~azure.mgmt.web.v2016_08_01.models.ManagedPipelineMode
:param virtual_applications: Virtual applications.
:type virtual_applications: list[~azure.mgmt.web.v2016_08_01.models.VirtualApplication]
:param load_balancing: Site load balancing. Possible values include: "WeightedRoundRobin",
"LeastRequests", "LeastResponseTime", "WeightedTotalTraffic", "RequestHash".
:type load_balancing: str or ~azure.mgmt.web.v2016_08_01.models.SiteLoadBalancing
:param experiments: This is work around for polymorphic types.
:type experiments: ~azure.mgmt.web.v2016_08_01.models.Experiments
:param limits: Site limits.
:type limits: ~azure.mgmt.web.v2016_08_01.models.SiteLimits
:param auto_heal_enabled: :code:`<code>true</code>` if Auto Heal is enabled; otherwise,
:code:`<code>false</code>`.
:type auto_heal_enabled: bool
:param auto_heal_rules: Auto Heal rules.
:type auto_heal_rules: ~azure.mgmt.web.v2016_08_01.models.AutoHealRules
:param tracing_options: Tracing options.
:type tracing_options: str
:param vnet_name: Virtual Network name.
:type vnet_name: str
:param cors: Cross-Origin Resource Sharing (CORS) settings.
:type cors: ~azure.mgmt.web.v2016_08_01.models.CorsSettings
:param push: Push endpoint settings.
:type push: ~azure.mgmt.web.v2016_08_01.models.PushSettings
:param api_definition: Information about the formal API definition for the app.
:type api_definition: ~azure.mgmt.web.v2016_08_01.models.ApiDefinitionInfo
:param auto_swap_slot_name: Auto-swap slot name.
:type auto_swap_slot_name: str
:param local_my_sql_enabled: :code:`<code>true</code>` to enable local MySQL; otherwise,
:code:`<code>false</code>`.
:type local_my_sql_enabled: bool
:param ip_security_restrictions: IP security restrictions.
:type ip_security_restrictions: list[~azure.mgmt.web.v2016_08_01.models.IpSecurityRestriction]
:param http20_enabled: Http20Enabled: configures a web site to allow clients to connect over
http2.0.
:type http20_enabled: bool
:param min_tls_version: MinTlsVersion: configures the minimum version of TLS required for SSL
requests. Possible values include: "1.0", "1.1", "1.2".
:type min_tls_version: str or ~azure.mgmt.web.v2016_08_01.models.SupportedTlsVersions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'machine_key': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'number_of_workers': {'key': 'properties.numberOfWorkers', 'type': 'int'},
'default_documents': {'key': 'properties.defaultDocuments', 'type': '[str]'},
'net_framework_version': {'key': 'properties.netFrameworkVersion', 'type': 'str'},
'php_version': {'key': 'properties.phpVersion', 'type': 'str'},
'python_version': {'key': 'properties.pythonVersion', 'type': 'str'},
'node_version': {'key': 'properties.nodeVersion', 'type': 'str'},
'linux_fx_version': {'key': 'properties.linuxFxVersion', 'type': 'str'},
'request_tracing_enabled': {'key': 'properties.requestTracingEnabled', 'type': 'bool'},
'request_tracing_expiration_time': {'key': 'properties.requestTracingExpirationTime', 'type': 'iso-8601'},
'remote_debugging_enabled': {'key': 'properties.remoteDebuggingEnabled', 'type': 'bool'},
'remote_debugging_version': {'key': 'properties.remoteDebuggingVersion', 'type': 'str'},
'http_logging_enabled': {'key': 'properties.httpLoggingEnabled', 'type': 'bool'},
'logs_directory_size_limit': {'key': 'properties.logsDirectorySizeLimit', 'type': 'int'},
'detailed_error_logging_enabled': {'key': 'properties.detailedErrorLoggingEnabled', 'type': 'bool'},
'publishing_username': {'key': 'properties.publishingUsername', 'type': 'str'},
'app_settings': {'key': 'properties.appSettings', 'type': '[NameValuePair]'},
'connection_strings': {'key': 'properties.connectionStrings', 'type': '[ConnStringInfo]'},
'machine_key': {'key': 'properties.machineKey', 'type': 'SiteMachineKey'},
'handler_mappings': {'key': 'properties.handlerMappings', 'type': '[HandlerMapping]'},
'document_root': {'key': 'properties.documentRoot', 'type': 'str'},
'scm_type': {'key': 'properties.scmType', 'type': 'str'},
'use32_bit_worker_process': {'key': 'properties.use32BitWorkerProcess', 'type': 'bool'},
'web_sockets_enabled': {'key': 'properties.webSocketsEnabled', 'type': 'bool'},
'always_on': {'key': 'properties.alwaysOn', 'type': 'bool'},
'java_version': {'key': 'properties.javaVersion', 'type': 'str'},
'java_container': {'key': 'properties.javaContainer', 'type': 'str'},
'java_container_version': {'key': 'properties.javaContainerVersion', 'type': 'str'},
'app_command_line': {'key': 'properties.appCommandLine', 'type': 'str'},
'managed_pipeline_mode': {'key': 'properties.managedPipelineMode', 'type': 'str'},
'virtual_applications': {'key': 'properties.virtualApplications', 'type': '[VirtualApplication]'},
'load_balancing': {'key': 'properties.loadBalancing', 'type': 'str'},
'experiments': {'key': 'properties.experiments', 'type': 'Experiments'},
'limits': {'key': 'properties.limits', 'type': 'SiteLimits'},
'auto_heal_enabled': {'key': 'properties.autoHealEnabled', 'type': 'bool'},
'auto_heal_rules': {'key': 'properties.autoHealRules', 'type': 'AutoHealRules'},
'tracing_options': {'key': 'properties.tracingOptions', 'type': 'str'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'cors': {'key': 'properties.cors', 'type': 'CorsSettings'},
'push': {'key': 'properties.push', 'type': 'PushSettings'},
'api_definition': {'key': 'properties.apiDefinition', 'type': 'ApiDefinitionInfo'},
'auto_swap_slot_name': {'key': 'properties.autoSwapSlotName', 'type': 'str'},
'local_my_sql_enabled': {'key': 'properties.localMySqlEnabled', 'type': 'bool'},
'ip_security_restrictions': {'key': 'properties.ipSecurityRestrictions', 'type': '[IpSecurityRestriction]'},
'http20_enabled': {'key': 'properties.http20Enabled', 'type': 'bool'},
'min_tls_version': {'key': 'properties.minTlsVersion', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
number_of_workers: Optional[int] = None,
default_documents: Optional[List[str]] = None,
net_framework_version: Optional[str] = "v4.6",
php_version: Optional[str] = None,
python_version: Optional[str] = None,
node_version: Optional[str] = None,
linux_fx_version: Optional[str] = None,
request_tracing_enabled: Optional[bool] = None,
request_tracing_expiration_time: Optional[datetime.datetime] = None,
remote_debugging_enabled: Optional[bool] = None,
remote_debugging_version: Optional[str] = None,
http_logging_enabled: Optional[bool] = None,
logs_directory_size_limit: Optional[int] = None,
detailed_error_logging_enabled: Optional[bool] = None,
publishing_username: Optional[str] = None,
app_settings: Optional[List["NameValuePair"]] = None,
connection_strings: Optional[List["ConnStringInfo"]] = None,
handler_mappings: Optional[List["HandlerMapping"]] = None,
document_root: Optional[str] = None,
scm_type: Optional[Union[str, "ScmType"]] = None,
use32_bit_worker_process: Optional[bool] = None,
web_sockets_enabled: Optional[bool] = None,
always_on: Optional[bool] = None,
java_version: Optional[str] = None,
java_container: Optional[str] = None,
java_container_version: Optional[str] = None,
app_command_line: Optional[str] = None,
managed_pipeline_mode: Optional[Union[str, "ManagedPipelineMode"]] = None,
virtual_applications: Optional[List["VirtualApplication"]] = None,
load_balancing: Optional[Union[str, "SiteLoadBalancing"]] = None,
experiments: Optional["Experiments"] = None,
limits: Optional["SiteLimits"] = None,
auto_heal_enabled: Optional[bool] = None,
auto_heal_rules: Optional["AutoHealRules"] = None,
tracing_options: Optional[str] = None,
vnet_name: Optional[str] = None,
cors: Optional["CorsSettings"] = None,
push: Optional["PushSettings"] = None,
api_definition: Optional["ApiDefinitionInfo"] = None,
auto_swap_slot_name: Optional[str] = None,
local_my_sql_enabled: Optional[bool] = False,
ip_security_restrictions: Optional[List["IpSecurityRestriction"]] = None,
http20_enabled: Optional[bool] = True,
min_tls_version: Optional[Union[str, "SupportedTlsVersions"]] = None,
**kwargs
):
super(SiteConfigResource, self).__init__(kind=kind, **kwargs)
self.number_of_workers = number_of_workers
self.default_documents = default_documents
self.net_framework_version = net_framework_version
self.php_version = php_version
self.python_version = python_version
self.node_version = node_version
self.linux_fx_version = linux_fx_version
self.request_tracing_enabled = request_tracing_enabled
self.request_tracing_expiration_time = request_tracing_expiration_time
self.remote_debugging_enabled = remote_debugging_enabled
self.remote_debugging_version = remote_debugging_version
self.http_logging_enabled = http_logging_enabled
self.logs_directory_size_limit = logs_directory_size_limit
self.detailed_error_logging_enabled = detailed_error_logging_enabled
self.publishing_username = publishing_username
self.app_settings = app_settings
self.connection_strings = connection_strings
self.machine_key = None
self.handler_mappings = handler_mappings
self.document_root = document_root
self.scm_type = scm_type
self.use32_bit_worker_process = use32_bit_worker_process
self.web_sockets_enabled = web_sockets_enabled
self.always_on = always_on
self.java_version = java_version
self.java_container = java_container
self.java_container_version = java_container_version
self.app_command_line = app_command_line
self.managed_pipeline_mode = managed_pipeline_mode
self.virtual_applications = virtual_applications
self.load_balancing = load_balancing
self.experiments = experiments
self.limits = limits
self.auto_heal_enabled = auto_heal_enabled
self.auto_heal_rules = auto_heal_rules
self.tracing_options = tracing_options
self.vnet_name = vnet_name
self.cors = cors
self.push = push
self.api_definition = api_definition
self.auto_swap_slot_name = auto_swap_slot_name
self.local_my_sql_enabled = local_my_sql_enabled
self.ip_security_restrictions = ip_security_restrictions
self.http20_enabled = http20_enabled
self.min_tls_version = min_tls_version
class SiteConfigResourceCollection(msrest.serialization.Model):
"""Collection of site configurations.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.SiteConfigResource]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SiteConfigResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["SiteConfigResource"],
**kwargs
):
super(SiteConfigResourceCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class SiteConfigurationSnapshotInfo(ProxyOnlyResource):
"""A snapshot of a web app configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar time: The time the snapshot was taken.
:vartype time: ~datetime.datetime
:ivar id_properties_id: The id of the snapshot.
:vartype id_properties_id: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'time': {'readonly': True},
'id_properties_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'time': {'key': 'properties.time', 'type': 'iso-8601'},
'id_properties_id': {'key': 'properties.id', 'type': 'int'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(SiteConfigurationSnapshotInfo, self).__init__(kind=kind, **kwargs)
self.time = None
self.id_properties_id = None
class SiteConfigurationSnapshotInfoCollection(msrest.serialization.Model):
"""Collection of metadata for the app configuration snapshots that can be restored.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.SiteConfigurationSnapshotInfo]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SiteConfigurationSnapshotInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["SiteConfigurationSnapshotInfo"],
**kwargs
):
super(SiteConfigurationSnapshotInfoCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class SiteExtensionInfo(ProxyOnlyResource):
"""Site Extension Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param id_properties_id: Site extension ID.
:type id_properties_id: str
:param title: Site extension title.
:type title: str
:param type_properties_type: Site extension type. Possible values include: "Gallery",
"WebRoot".
:type type_properties_type: str or ~azure.mgmt.web.v2016_08_01.models.SiteExtensionType
:param summary: Summary description.
:type summary: str
:param description: Detailed description.
:type description: str
:param version: Version information.
:type version: str
:param extension_url: Extension URL.
:type extension_url: str
:param project_url: Project URL.
:type project_url: str
:param icon_url: Icon URL.
:type icon_url: str
:param license_url: License URL.
:type license_url: str
:param feed_url: Feed URL.
:type feed_url: str
:param authors: List of authors.
:type authors: list[str]
:param installation_args: Installer command line parameters.
:type installation_args: str
:param published_date_time: Published timestamp.
:type published_date_time: ~datetime.datetime
:param download_count: Count of downloads.
:type download_count: int
:param local_is_latest_version: :code:`<code>true</code>` if the local version is the latest
version; :code:`<code>false</code>` otherwise.
:type local_is_latest_version: bool
:param local_path: Local path.
:type local_path: str
:param installed_date_time: Installed timestamp.
:type installed_date_time: ~datetime.datetime
:param provisioning_state: Provisioning state.
:type provisioning_state: str
:param comment: Site Extension comment.
:type comment: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'id_properties_id': {'key': 'properties.id', 'type': 'str'},
'title': {'key': 'properties.title', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'summary': {'key': 'properties.summary', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'extension_url': {'key': 'properties.extensionUrl', 'type': 'str'},
'project_url': {'key': 'properties.projectUrl', 'type': 'str'},
'icon_url': {'key': 'properties.iconUrl', 'type': 'str'},
'license_url': {'key': 'properties.licenseUrl', 'type': 'str'},
'feed_url': {'key': 'properties.feedUrl', 'type': 'str'},
'authors': {'key': 'properties.authors', 'type': '[str]'},
'installation_args': {'key': 'properties.installationArgs', 'type': 'str'},
'published_date_time': {'key': 'properties.publishedDateTime', 'type': 'iso-8601'},
'download_count': {'key': 'properties.downloadCount', 'type': 'int'},
'local_is_latest_version': {'key': 'properties.localIsLatestVersion', 'type': 'bool'},
'local_path': {'key': 'properties.localPath', 'type': 'str'},
'installed_date_time': {'key': 'properties.installedDateTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'comment': {'key': 'properties.comment', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
id_properties_id: Optional[str] = None,
title: Optional[str] = None,
type_properties_type: Optional[Union[str, "SiteExtensionType"]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
version: Optional[str] = None,
extension_url: Optional[str] = None,
project_url: Optional[str] = None,
icon_url: Optional[str] = None,
license_url: Optional[str] = None,
feed_url: Optional[str] = None,
authors: Optional[List[str]] = None,
installation_args: Optional[str] = None,
published_date_time: Optional[datetime.datetime] = None,
download_count: Optional[int] = None,
local_is_latest_version: Optional[bool] = None,
local_path: Optional[str] = None,
installed_date_time: Optional[datetime.datetime] = None,
provisioning_state: Optional[str] = None,
comment: Optional[str] = None,
**kwargs
):
super(SiteExtensionInfo, self).__init__(kind=kind, **kwargs)
self.id_properties_id = id_properties_id
self.title = title
self.type_properties_type = type_properties_type
self.summary = summary
self.description = description
self.version = version
self.extension_url = extension_url
self.project_url = project_url
self.icon_url = icon_url
self.license_url = license_url
self.feed_url = feed_url
self.authors = authors
self.installation_args = installation_args
self.published_date_time = published_date_time
self.download_count = download_count
self.local_is_latest_version = local_is_latest_version
self.local_path = local_path
self.installed_date_time = installed_date_time
self.provisioning_state = provisioning_state
self.comment = comment
class SiteExtensionInfoCollection(msrest.serialization.Model):
"""Collection of Kudu site extension information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.SiteExtensionInfo]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SiteExtensionInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["SiteExtensionInfo"],
**kwargs
):
super(SiteExtensionInfoCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class SiteInstance(ProxyOnlyResource):
"""Instance of an app.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar name_properties_name: Name of instance.
:vartype name_properties_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'name_properties_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(SiteInstance, self).__init__(kind=kind, **kwargs)
self.name_properties_name = None
class SiteLimits(msrest.serialization.Model):
"""Metric limits set on an app.
:param max_percentage_cpu: Maximum allowed CPU usage percentage.
:type max_percentage_cpu: float
:param max_memory_in_mb: Maximum allowed memory usage in MB.
:type max_memory_in_mb: long
:param max_disk_size_in_mb: Maximum allowed disk size usage in MB.
:type max_disk_size_in_mb: long
"""
_attribute_map = {
'max_percentage_cpu': {'key': 'maxPercentageCpu', 'type': 'float'},
'max_memory_in_mb': {'key': 'maxMemoryInMb', 'type': 'long'},
'max_disk_size_in_mb': {'key': 'maxDiskSizeInMb', 'type': 'long'},
}
def __init__(
self,
*,
max_percentage_cpu: Optional[float] = None,
max_memory_in_mb: Optional[int] = None,
max_disk_size_in_mb: Optional[int] = None,
**kwargs
):
super(SiteLimits, self).__init__(**kwargs)
self.max_percentage_cpu = max_percentage_cpu
self.max_memory_in_mb = max_memory_in_mb
self.max_disk_size_in_mb = max_disk_size_in_mb
class SiteLogsConfig(ProxyOnlyResource):
"""Configuration of App Service site logs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param application_logs: Application logs configuration.
:type application_logs: ~azure.mgmt.web.v2016_08_01.models.ApplicationLogsConfig
:param http_logs: HTTP logs configuration.
:type http_logs: ~azure.mgmt.web.v2016_08_01.models.HttpLogsConfig
:param failed_requests_tracing: Failed requests tracing configuration.
:type failed_requests_tracing: ~azure.mgmt.web.v2016_08_01.models.EnabledConfig
:param detailed_error_messages: Detailed error messages configuration.
:type detailed_error_messages: ~azure.mgmt.web.v2016_08_01.models.EnabledConfig
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'application_logs': {'key': 'properties.applicationLogs', 'type': 'ApplicationLogsConfig'},
'http_logs': {'key': 'properties.httpLogs', 'type': 'HttpLogsConfig'},
'failed_requests_tracing': {'key': 'properties.failedRequestsTracing', 'type': 'EnabledConfig'},
'detailed_error_messages': {'key': 'properties.detailedErrorMessages', 'type': 'EnabledConfig'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
application_logs: Optional["ApplicationLogsConfig"] = None,
http_logs: Optional["HttpLogsConfig"] = None,
failed_requests_tracing: Optional["EnabledConfig"] = None,
detailed_error_messages: Optional["EnabledConfig"] = None,
**kwargs
):
super(SiteLogsConfig, self).__init__(kind=kind, **kwargs)
self.application_logs = application_logs
self.http_logs = http_logs
self.failed_requests_tracing = failed_requests_tracing
self.detailed_error_messages = detailed_error_messages
class SiteMachineKey(msrest.serialization.Model):
"""MachineKey of an app.
:param validation: MachineKey validation.
:type validation: str
:param validation_key: Validation key.
:type validation_key: str
:param decryption: Algorithm used for decryption.
:type decryption: str
:param decryption_key: Decryption key.
:type decryption_key: str
"""
_attribute_map = {
'validation': {'key': 'validation', 'type': 'str'},
'validation_key': {'key': 'validationKey', 'type': 'str'},
'decryption': {'key': 'decryption', 'type': 'str'},
'decryption_key': {'key': 'decryptionKey', 'type': 'str'},
}
def __init__(
self,
*,
validation: Optional[str] = None,
validation_key: Optional[str] = None,
decryption: Optional[str] = None,
decryption_key: Optional[str] = None,
**kwargs
):
super(SiteMachineKey, self).__init__(**kwargs)
self.validation = validation
self.validation_key = validation_key
self.decryption = decryption
self.decryption_key = decryption_key
class SitePatchResource(ProxyOnlyResource):
"""ARM resource for a site.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar state: Current state of the app.
:vartype state: str
:ivar host_names: Hostnames associated with the app.
:vartype host_names: list[str]
:ivar repository_site_name: Name of the repository site.
:vartype repository_site_name: str
:ivar usage_state: State indicating whether the app has exceeded its quota usage. Read-only.
Possible values include: "Normal", "Exceeded".
:vartype usage_state: str or ~azure.mgmt.web.v2016_08_01.models.UsageState
:param enabled: :code:`<code>true</code>` if the app is enabled; otherwise,
:code:`<code>false</code>`. Setting this value to false disables the app (takes the app
offline).
:type enabled: bool
:ivar enabled_host_names: Enabled hostnames for the app.Hostnames need to be assigned (see
HostNames) AND enabled. Otherwise,
the app is not served on those hostnames.
:vartype enabled_host_names: list[str]
:ivar availability_state: Management information availability state for the app. Possible
values include: "Normal", "Limited", "DisasterRecoveryMode".
:vartype availability_state: str or ~azure.mgmt.web.v2016_08_01.models.SiteAvailabilityState
:param host_name_ssl_states: Hostname SSL states are used to manage the SSL bindings for app's
hostnames.
:type host_name_ssl_states: list[~azure.mgmt.web.v2016_08_01.models.HostNameSslState]
:param server_farm_id: Resource ID of the associated App Service plan, formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:type server_farm_id: str
:param reserved: :code:`<code>true</code>` if reserved; otherwise, :code:`<code>false</code>`.
:type reserved: bool
:ivar last_modified_time_utc: Last time the app was modified, in UTC. Read-only.
:vartype last_modified_time_utc: ~datetime.datetime
:param site_config: Configuration of the app.
:type site_config: ~azure.mgmt.web.v2016_08_01.models.SiteConfig
:ivar traffic_manager_host_names: Azure Traffic Manager hostnames associated with the app.
Read-only.
:vartype traffic_manager_host_names: list[str]
:param scm_site_also_stopped: :code:`<code>true</code>` to stop SCM (KUDU) site when the app is
stopped; otherwise, :code:`<code>false</code>`. The default is :code:`<code>false</code>`.
:type scm_site_also_stopped: bool
:ivar target_swap_slot: Specifies which deployment slot this app will swap into. Read-only.
:vartype target_swap_slot: str
:param hosting_environment_profile: App Service Environment to use for the app.
:type hosting_environment_profile: ~azure.mgmt.web.v2016_08_01.models.HostingEnvironmentProfile
:param client_affinity_enabled: :code:`<code>true</code>` to enable client affinity;
:code:`<code>false</code>` to stop sending session affinity cookies, which route client
requests in the same session to the same instance. Default is :code:`<code>true</code>`.
:type client_affinity_enabled: bool
:param client_cert_enabled: :code:`<code>true</code>` to enable client certificate
authentication (TLS mutual authentication); otherwise, :code:`<code>false</code>`. Default is
:code:`<code>false</code>`.
:type client_cert_enabled: bool
:param host_names_disabled: :code:`<code>true</code>` to disable the public hostnames of the
app; otherwise, :code:`<code>false</code>`.
If :code:`<code>true</code>`, the app is only accessible via API management process.
:type host_names_disabled: bool
:ivar outbound_ip_addresses: List of IP addresses that the app uses for outbound connections
(e.g. database access). Includes VIPs from tenants that site can be hosted with current
settings. Read-only.
:vartype outbound_ip_addresses: str
:ivar possible_outbound_ip_addresses: List of IP addresses that the app uses for outbound
connections (e.g. database access). Includes VIPs from all tenants. Read-only.
:vartype possible_outbound_ip_addresses: str
:param container_size: Size of the function container.
:type container_size: int
:param daily_memory_time_quota: Maximum allowed daily memory-time quota (applicable on dynamic
apps only).
:type daily_memory_time_quota: int
:ivar suspended_till: App suspended till in case memory-time quota is exceeded.
:vartype suspended_till: ~datetime.datetime
:ivar max_number_of_workers: Maximum number of workers.
This only applies to Functions container.
:vartype max_number_of_workers: int
:param cloning_info: If specified during app creation, the app is cloned from a source app.
:type cloning_info: ~azure.mgmt.web.v2016_08_01.models.CloningInfo
:param snapshot_info: If specified during app creation, the app is created from a previous
snapshot.
:type snapshot_info: ~azure.mgmt.web.v2016_08_01.models.SnapshotRecoveryRequest
:ivar resource_group: Name of the resource group the app belongs to. Read-only.
:vartype resource_group: str
:ivar is_default_container: :code:`<code>true</code>` if the app is a default container;
otherwise, :code:`<code>false</code>`.
:vartype is_default_container: bool
:ivar default_host_name: Default hostname of the app. Read-only.
:vartype default_host_name: str
:ivar slot_swap_status: Status of the last deployment slot swap operation.
:vartype slot_swap_status: ~azure.mgmt.web.v2016_08_01.models.SlotSwapStatus
:param https_only: HttpsOnly: configures a web site to accept only https requests. Issues
redirect for
http requests.
:type https_only: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'state': {'readonly': True},
'host_names': {'readonly': True},
'repository_site_name': {'readonly': True},
'usage_state': {'readonly': True},
'enabled_host_names': {'readonly': True},
'availability_state': {'readonly': True},
'last_modified_time_utc': {'readonly': True},
'traffic_manager_host_names': {'readonly': True},
'target_swap_slot': {'readonly': True},
'outbound_ip_addresses': {'readonly': True},
'possible_outbound_ip_addresses': {'readonly': True},
'suspended_till': {'readonly': True},
'max_number_of_workers': {'readonly': True},
'resource_group': {'readonly': True},
'is_default_container': {'readonly': True},
'default_host_name': {'readonly': True},
'slot_swap_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'host_names': {'key': 'properties.hostNames', 'type': '[str]'},
'repository_site_name': {'key': 'properties.repositorySiteName', 'type': 'str'},
'usage_state': {'key': 'properties.usageState', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'enabled_host_names': {'key': 'properties.enabledHostNames', 'type': '[str]'},
'availability_state': {'key': 'properties.availabilityState', 'type': 'str'},
'host_name_ssl_states': {'key': 'properties.hostNameSslStates', 'type': '[HostNameSslState]'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
'reserved': {'key': 'properties.reserved', 'type': 'bool'},
'last_modified_time_utc': {'key': 'properties.lastModifiedTimeUtc', 'type': 'iso-8601'},
'site_config': {'key': 'properties.siteConfig', 'type': 'SiteConfig'},
'traffic_manager_host_names': {'key': 'properties.trafficManagerHostNames', 'type': '[str]'},
'scm_site_also_stopped': {'key': 'properties.scmSiteAlsoStopped', 'type': 'bool'},
'target_swap_slot': {'key': 'properties.targetSwapSlot', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'client_affinity_enabled': {'key': 'properties.clientAffinityEnabled', 'type': 'bool'},
'client_cert_enabled': {'key': 'properties.clientCertEnabled', 'type': 'bool'},
'host_names_disabled': {'key': 'properties.hostNamesDisabled', 'type': 'bool'},
'outbound_ip_addresses': {'key': 'properties.outboundIpAddresses', 'type': 'str'},
'possible_outbound_ip_addresses': {'key': 'properties.possibleOutboundIpAddresses', 'type': 'str'},
'container_size': {'key': 'properties.containerSize', 'type': 'int'},
'daily_memory_time_quota': {'key': 'properties.dailyMemoryTimeQuota', 'type': 'int'},
'suspended_till': {'key': 'properties.suspendedTill', 'type': 'iso-8601'},
'max_number_of_workers': {'key': 'properties.maxNumberOfWorkers', 'type': 'int'},
'cloning_info': {'key': 'properties.cloningInfo', 'type': 'CloningInfo'},
'snapshot_info': {'key': 'properties.snapshotInfo', 'type': 'SnapshotRecoveryRequest'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'is_default_container': {'key': 'properties.isDefaultContainer', 'type': 'bool'},
'default_host_name': {'key': 'properties.defaultHostName', 'type': 'str'},
'slot_swap_status': {'key': 'properties.slotSwapStatus', 'type': 'SlotSwapStatus'},
'https_only': {'key': 'properties.httpsOnly', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
enabled: Optional[bool] = None,
host_name_ssl_states: Optional[List["HostNameSslState"]] = None,
server_farm_id: Optional[str] = None,
reserved: Optional[bool] = False,
site_config: Optional["SiteConfig"] = None,
scm_site_also_stopped: Optional[bool] = False,
hosting_environment_profile: Optional["HostingEnvironmentProfile"] = None,
client_affinity_enabled: Optional[bool] = None,
client_cert_enabled: Optional[bool] = None,
host_names_disabled: Optional[bool] = None,
container_size: Optional[int] = None,
daily_memory_time_quota: Optional[int] = None,
cloning_info: Optional["CloningInfo"] = None,
snapshot_info: Optional["SnapshotRecoveryRequest"] = None,
https_only: Optional[bool] = None,
**kwargs
):
super(SitePatchResource, self).__init__(kind=kind, **kwargs)
self.state = None
self.host_names = None
self.repository_site_name = None
self.usage_state = None
self.enabled = enabled
self.enabled_host_names = None
self.availability_state = None
self.host_name_ssl_states = host_name_ssl_states
self.server_farm_id = server_farm_id
self.reserved = reserved
self.last_modified_time_utc = None
self.site_config = site_config
self.traffic_manager_host_names = None
self.scm_site_also_stopped = scm_site_also_stopped
self.target_swap_slot = None
self.hosting_environment_profile = hosting_environment_profile
self.client_affinity_enabled = client_affinity_enabled
self.client_cert_enabled = client_cert_enabled
self.host_names_disabled = host_names_disabled
self.outbound_ip_addresses = None
self.possible_outbound_ip_addresses = None
self.container_size = container_size
self.daily_memory_time_quota = daily_memory_time_quota
self.suspended_till = None
self.max_number_of_workers = None
self.cloning_info = cloning_info
self.snapshot_info = snapshot_info
self.resource_group = None
self.is_default_container = None
self.default_host_name = None
self.slot_swap_status = None
self.https_only = https_only
class SitePhpErrorLogFlag(ProxyOnlyResource):
"""Used for getting PHP error logging flag.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param local_log_errors: Local log_errors setting.
:type local_log_errors: str
:param master_log_errors: Master log_errors setting.
:type master_log_errors: str
:param local_log_errors_max_length: Local log_errors_max_len setting.
:type local_log_errors_max_length: str
:param master_log_errors_max_length: Master log_errors_max_len setting.
:type master_log_errors_max_length: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'local_log_errors': {'key': 'properties.localLogErrors', 'type': 'str'},
'master_log_errors': {'key': 'properties.masterLogErrors', 'type': 'str'},
'local_log_errors_max_length': {'key': 'properties.localLogErrorsMaxLength', 'type': 'str'},
'master_log_errors_max_length': {'key': 'properties.masterLogErrorsMaxLength', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
local_log_errors: Optional[str] = None,
master_log_errors: Optional[str] = None,
local_log_errors_max_length: Optional[str] = None,
master_log_errors_max_length: Optional[str] = None,
**kwargs
):
super(SitePhpErrorLogFlag, self).__init__(kind=kind, **kwargs)
self.local_log_errors = local_log_errors
self.master_log_errors = master_log_errors
self.local_log_errors_max_length = local_log_errors_max_length
self.master_log_errors_max_length = master_log_errors_max_length
class SiteSourceControl(ProxyOnlyResource):
"""Source control configuration for an app.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param repo_url: Repository or source control URL.
:type repo_url: str
:param branch: Name of branch to use for deployment.
:type branch: str
:param is_manual_integration: :code:`<code>true</code>` to limit to manual integration;
:code:`<code>false</code>` to enable continuous integration (which configures webhooks into
online repos like GitHub).
:type is_manual_integration: bool
:param deployment_rollback_enabled: :code:`<code>true</code>` to enable deployment rollback;
otherwise, :code:`<code>false</code>`.
:type deployment_rollback_enabled: bool
:param is_mercurial: :code:`<code>true</code>` for a Mercurial repository;
:code:`<code>false</code>` for a Git repository.
:type is_mercurial: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'repo_url': {'key': 'properties.repoUrl', 'type': 'str'},
'branch': {'key': 'properties.branch', 'type': 'str'},
'is_manual_integration': {'key': 'properties.isManualIntegration', 'type': 'bool'},
'deployment_rollback_enabled': {'key': 'properties.deploymentRollbackEnabled', 'type': 'bool'},
'is_mercurial': {'key': 'properties.isMercurial', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
repo_url: Optional[str] = None,
branch: Optional[str] = None,
is_manual_integration: Optional[bool] = None,
deployment_rollback_enabled: Optional[bool] = None,
is_mercurial: Optional[bool] = None,
**kwargs
):
super(SiteSourceControl, self).__init__(kind=kind, **kwargs)
self.repo_url = repo_url
self.branch = branch
self.is_manual_integration = is_manual_integration
self.deployment_rollback_enabled = deployment_rollback_enabled
self.is_mercurial = is_mercurial
class SlotConfigNamesResource(ProxyOnlyResource):
"""Slot Config names azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param connection_string_names: List of connection string names.
:type connection_string_names: list[str]
:param app_setting_names: List of application settings names.
:type app_setting_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'connection_string_names': {'key': 'properties.connectionStringNames', 'type': '[str]'},
'app_setting_names': {'key': 'properties.appSettingNames', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
connection_string_names: Optional[List[str]] = None,
app_setting_names: Optional[List[str]] = None,
**kwargs
):
super(SlotConfigNamesResource, self).__init__(kind=kind, **kwargs)
self.connection_string_names = connection_string_names
self.app_setting_names = app_setting_names
class SlotDifference(ProxyOnlyResource):
"""A setting difference between two deployment slots of an app.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar type_properties_type: Type of the difference: Information, Warning or Error.
:vartype type_properties_type: str
:ivar setting_type: The type of the setting: General, AppSetting or ConnectionString.
:vartype setting_type: str
:ivar diff_rule: Rule that describes how to process the setting difference during a slot swap.
:vartype diff_rule: str
:ivar setting_name: Name of the setting.
:vartype setting_name: str
:ivar value_in_current_slot: Value of the setting in the current slot.
:vartype value_in_current_slot: str
:ivar value_in_target_slot: Value of the setting in the target slot.
:vartype value_in_target_slot: str
:ivar description: Description of the setting difference.
:vartype description: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'type_properties_type': {'readonly': True},
'setting_type': {'readonly': True},
'diff_rule': {'readonly': True},
'setting_name': {'readonly': True},
'value_in_current_slot': {'readonly': True},
'value_in_target_slot': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'setting_type': {'key': 'properties.settingType', 'type': 'str'},
'diff_rule': {'key': 'properties.diffRule', 'type': 'str'},
'setting_name': {'key': 'properties.settingName', 'type': 'str'},
'value_in_current_slot': {'key': 'properties.valueInCurrentSlot', 'type': 'str'},
'value_in_target_slot': {'key': 'properties.valueInTargetSlot', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(SlotDifference, self).__init__(kind=kind, **kwargs)
self.type_properties_type = None
self.setting_type = None
self.diff_rule = None
self.setting_name = None
self.value_in_current_slot = None
self.value_in_target_slot = None
self.description = None
class SlotDifferenceCollection(msrest.serialization.Model):
"""Collection of slot differences.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.SlotDifference]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SlotDifference]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["SlotDifference"],
**kwargs
):
super(SlotDifferenceCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class SlotSwapStatus(msrest.serialization.Model):
"""The status of the last successful slot swap operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar timestamp_utc: The time the last successful slot swap completed.
:vartype timestamp_utc: ~datetime.datetime
:ivar source_slot_name: The source slot of the last swap operation.
:vartype source_slot_name: str
:ivar destination_slot_name: The destination slot of the last swap operation.
:vartype destination_slot_name: str
"""
_validation = {
'timestamp_utc': {'readonly': True},
'source_slot_name': {'readonly': True},
'destination_slot_name': {'readonly': True},
}
_attribute_map = {
'timestamp_utc': {'key': 'timestampUtc', 'type': 'iso-8601'},
'source_slot_name': {'key': 'sourceSlotName', 'type': 'str'},
'destination_slot_name': {'key': 'destinationSlotName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SlotSwapStatus, self).__init__(**kwargs)
self.timestamp_utc = None
self.source_slot_name = None
self.destination_slot_name = None
class SlowRequestsBasedTrigger(msrest.serialization.Model):
"""Trigger based on request execution time.
:param time_taken: Time taken.
:type time_taken: str
:param count: Request Count.
:type count: int
:param time_interval: Time interval.
:type time_interval: str
"""
_attribute_map = {
'time_taken': {'key': 'timeTaken', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
'time_interval': {'key': 'timeInterval', 'type': 'str'},
}
def __init__(
self,
*,
time_taken: Optional[str] = None,
count: Optional[int] = None,
time_interval: Optional[str] = None,
**kwargs
):
super(SlowRequestsBasedTrigger, self).__init__(**kwargs)
self.time_taken = time_taken
self.count = count
self.time_interval = time_interval
class Snapshot(ProxyOnlyResource):
"""A snapshot of an app.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar time: The time the snapshot was taken.
:vartype time: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'time': {'key': 'properties.time', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(Snapshot, self).__init__(kind=kind, **kwargs)
self.time = None
class SnapshotCollection(msrest.serialization.Model):
"""Collection of snapshots which can be used to revert an app to a previous time.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.Snapshot]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Snapshot]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Snapshot"],
**kwargs
):
super(SnapshotCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class SnapshotRecoveryRequest(ProxyOnlyResource):
"""Details about app recovery operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param snapshot_time: Point in time in which the app recovery should be attempted, formatted as
a DateTime string.
:type snapshot_time: str
:param recovery_target: Specifies the web app that snapshot contents will be written to.
:type recovery_target: ~azure.mgmt.web.v2016_08_01.models.SnapshotRecoveryTarget
:param overwrite: If :code:`<code>true</code>` the recovery operation can overwrite source app;
otherwise, :code:`<code>false</code>`.
:type overwrite: bool
:param recover_configuration: If true, site configuration, in addition to content, will be
reverted.
:type recover_configuration: bool
:param ignore_conflicting_host_names: If true, custom hostname conflicts will be ignored when
recovering to a target web app.
This setting is only necessary when RecoverConfiguration is enabled.
:type ignore_conflicting_host_names: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'snapshot_time': {'key': 'properties.snapshotTime', 'type': 'str'},
'recovery_target': {'key': 'properties.recoveryTarget', 'type': 'SnapshotRecoveryTarget'},
'overwrite': {'key': 'properties.overwrite', 'type': 'bool'},
'recover_configuration': {'key': 'properties.recoverConfiguration', 'type': 'bool'},
'ignore_conflicting_host_names': {'key': 'properties.ignoreConflictingHostNames', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
snapshot_time: Optional[str] = None,
recovery_target: Optional["SnapshotRecoveryTarget"] = None,
overwrite: Optional[bool] = None,
recover_configuration: Optional[bool] = None,
ignore_conflicting_host_names: Optional[bool] = None,
**kwargs
):
super(SnapshotRecoveryRequest, self).__init__(kind=kind, **kwargs)
self.snapshot_time = snapshot_time
self.recovery_target = recovery_target
self.overwrite = overwrite
self.recover_configuration = recover_configuration
self.ignore_conflicting_host_names = ignore_conflicting_host_names
class SnapshotRecoveryTarget(msrest.serialization.Model):
"""Specifies the web app that snapshot contents will be written to.
:param location: Geographical location of the target web app, e.g. SouthEastAsia,
SouthCentralUS.
:type location: str
:param id: ARM resource ID of the target app.
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}
for production slots and
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slotName}
for other slots.
:type id: str
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
location: Optional[str] = None,
id: Optional[str] = None,
**kwargs
):
super(SnapshotRecoveryTarget, self).__init__(**kwargs)
self.location = location
self.id = id
class StatusCodesBasedTrigger(msrest.serialization.Model):
"""Trigger based on status code.
:param status: HTTP status code.
:type status: int
:param sub_status: Request Sub Status.
:type sub_status: int
:param win32_status: Win32 error code.
:type win32_status: int
:param count: Request Count.
:type count: int
:param time_interval: Time interval.
:type time_interval: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'int'},
'sub_status': {'key': 'subStatus', 'type': 'int'},
'win32_status': {'key': 'win32Status', 'type': 'int'},
'count': {'key': 'count', 'type': 'int'},
'time_interval': {'key': 'timeInterval', 'type': 'str'},
}
def __init__(
self,
*,
status: Optional[int] = None,
sub_status: Optional[int] = None,
win32_status: Optional[int] = None,
count: Optional[int] = None,
time_interval: Optional[str] = None,
**kwargs
):
super(StatusCodesBasedTrigger, self).__init__(**kwargs)
self.status = status
self.sub_status = sub_status
self.win32_status = win32_status
self.count = count
self.time_interval = time_interval
class StorageMigrationOptions(ProxyOnlyResource):
"""Options for app content migration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param azurefiles_connection_string: AzureFiles connection string.
:type azurefiles_connection_string: str
:param azurefiles_share: AzureFiles share.
:type azurefiles_share: str
:param switch_site_after_migration: :code:`<code>true</code>`if the app should be switched
over; otherwise, :code:`<code>false</code>`.
:type switch_site_after_migration: bool
:param block_write_access_to_site: :code:`<code>true</code>` if the app should be read only
during copy operation; otherwise, :code:`<code>false</code>`.
:type block_write_access_to_site: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'azurefiles_connection_string': {'key': 'properties.azurefilesConnectionString', 'type': 'str'},
'azurefiles_share': {'key': 'properties.azurefilesShare', 'type': 'str'},
'switch_site_after_migration': {'key': 'properties.switchSiteAfterMigration', 'type': 'bool'},
'block_write_access_to_site': {'key': 'properties.blockWriteAccessToSite', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
azurefiles_connection_string: Optional[str] = None,
azurefiles_share: Optional[str] = None,
switch_site_after_migration: Optional[bool] = False,
block_write_access_to_site: Optional[bool] = False,
**kwargs
):
super(StorageMigrationOptions, self).__init__(kind=kind, **kwargs)
self.azurefiles_connection_string = azurefiles_connection_string
self.azurefiles_share = azurefiles_share
self.switch_site_after_migration = switch_site_after_migration
self.block_write_access_to_site = block_write_access_to_site
class StorageMigrationResponse(ProxyOnlyResource):
"""Response for a migration of app content request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar operation_id: When server starts the migration process, it will return an operation ID
identifying that particular migration operation.
:vartype operation_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'operation_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'operation_id': {'key': 'properties.operationId', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(StorageMigrationResponse, self).__init__(kind=kind, **kwargs)
self.operation_id = None
class StringDictionary(ProxyOnlyResource):
"""String dictionary resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param properties: Settings.
:type properties: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
**kwargs
):
super(StringDictionary, self).__init__(kind=kind, **kwargs)
self.properties = properties
class TriggeredJobHistory(ProxyOnlyResource):
"""Triggered Web Job History. List of Triggered Web Job Run Information elements.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param triggered_job_runs: List of triggered web job runs.
:type triggered_job_runs: list[~azure.mgmt.web.v2016_08_01.models.TriggeredJobRun]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'triggered_job_runs': {'key': 'properties.triggeredJobRuns', 'type': '[TriggeredJobRun]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
triggered_job_runs: Optional[List["TriggeredJobRun"]] = None,
**kwargs
):
super(TriggeredJobHistory, self).__init__(kind=kind, **kwargs)
self.triggered_job_runs = triggered_job_runs
class TriggeredJobHistoryCollection(msrest.serialization.Model):
"""Collection of Kudu continuous web job information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.TriggeredJobHistory]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[TriggeredJobHistory]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["TriggeredJobHistory"],
**kwargs
):
super(TriggeredJobHistoryCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class TriggeredJobRun(ProxyOnlyResource):
"""Triggered Web Job Run Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param id_properties_id: Job ID.
:type id_properties_id: str
:ivar name_properties_name: Job name.
:vartype name_properties_name: str
:param status: Job status. Possible values include: "Success", "Failed", "Error".
:type status: str or ~azure.mgmt.web.v2016_08_01.models.TriggeredWebJobStatus
:param start_time: Start time.
:type start_time: ~datetime.datetime
:param end_time: End time.
:type end_time: ~datetime.datetime
:param duration: Job duration.
:type duration: str
:param output_url: Output URL.
:type output_url: str
:param error_url: Error URL.
:type error_url: str
:param url: Job URL.
:type url: str
:param job_name: Job name.
:type job_name: str
:param trigger: Job trigger.
:type trigger: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'name_properties_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'id_properties_id': {'key': 'properties.id', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'duration': {'key': 'properties.duration', 'type': 'str'},
'output_url': {'key': 'properties.outputUrl', 'type': 'str'},
'error_url': {'key': 'properties.errorUrl', 'type': 'str'},
'url': {'key': 'properties.url', 'type': 'str'},
'job_name': {'key': 'properties.jobName', 'type': 'str'},
'trigger': {'key': 'properties.trigger', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
id_properties_id: Optional[str] = None,
status: Optional[Union[str, "TriggeredWebJobStatus"]] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
duration: Optional[str] = None,
output_url: Optional[str] = None,
error_url: Optional[str] = None,
url: Optional[str] = None,
job_name: Optional[str] = None,
trigger: Optional[str] = None,
**kwargs
):
super(TriggeredJobRun, self).__init__(kind=kind, **kwargs)
self.id_properties_id = id_properties_id
self.name_properties_name = None
self.status = status
self.start_time = start_time
self.end_time = end_time
self.duration = duration
self.output_url = output_url
self.error_url = error_url
self.url = url
self.job_name = job_name
self.trigger = trigger
class TriggeredWebJob(ProxyOnlyResource):
"""Triggered Web Job Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param latest_run: Latest job run information.
:type latest_run: ~azure.mgmt.web.v2016_08_01.models.TriggeredJobRun
:param history_url: History URL.
:type history_url: str
:param scheduler_logs_url: Scheduler Logs URL.
:type scheduler_logs_url: str
:ivar name_properties_name: Job name. Used as job identifier in ARM resource URI.
:vartype name_properties_name: str
:param run_command: Run command.
:type run_command: str
:param url: Job URL.
:type url: str
:param extra_info_url: Extra Info URL.
:type extra_info_url: str
:param job_type: Job type. Possible values include: "Continuous", "Triggered".
:type job_type: str or ~azure.mgmt.web.v2016_08_01.models.WebJobType
:param error: Error information.
:type error: str
:param using_sdk: Using SDK?.
:type using_sdk: bool
:param settings: Job settings.
:type settings: dict[str, any]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'name_properties_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'latest_run': {'key': 'properties.latestRun', 'type': 'TriggeredJobRun'},
'history_url': {'key': 'properties.historyUrl', 'type': 'str'},
'scheduler_logs_url': {'key': 'properties.schedulerLogsUrl', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'run_command': {'key': 'properties.runCommand', 'type': 'str'},
'url': {'key': 'properties.url', 'type': 'str'},
'extra_info_url': {'key': 'properties.extraInfoUrl', 'type': 'str'},
'job_type': {'key': 'properties.jobType', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'str'},
'using_sdk': {'key': 'properties.usingSdk', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': '{object}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
latest_run: Optional["TriggeredJobRun"] = None,
history_url: Optional[str] = None,
scheduler_logs_url: Optional[str] = None,
run_command: Optional[str] = None,
url: Optional[str] = None,
extra_info_url: Optional[str] = None,
job_type: Optional[Union[str, "WebJobType"]] = None,
error: Optional[str] = None,
using_sdk: Optional[bool] = None,
settings: Optional[Dict[str, Any]] = None,
**kwargs
):
super(TriggeredWebJob, self).__init__(kind=kind, **kwargs)
self.latest_run = latest_run
self.history_url = history_url
self.scheduler_logs_url = scheduler_logs_url
self.name_properties_name = None
self.run_command = run_command
self.url = url
self.extra_info_url = extra_info_url
self.job_type = job_type
self.error = error
self.using_sdk = using_sdk
self.settings = settings
class TriggeredWebJobCollection(msrest.serialization.Model):
"""Collection of Kudu continuous web job information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.TriggeredWebJob]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[TriggeredWebJob]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["TriggeredWebJob"],
**kwargs
):
super(TriggeredWebJobCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class User(ProxyOnlyResource):
"""User credentials used for publishing activity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param user_name: Username.
:type user_name: str
:param publishing_user_name: Username used for publishing.
:type publishing_user_name: str
:param publishing_password: Password used for publishing.
:type publishing_password: str
:param publishing_password_hash: Password hash used for publishing.
:type publishing_password_hash: str
:param publishing_password_hash_salt: Password hash salt used for publishing.
:type publishing_password_hash_salt: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_name': {'key': 'properties.name', 'type': 'str'},
'publishing_user_name': {'key': 'properties.publishingUserName', 'type': 'str'},
'publishing_password': {'key': 'properties.publishingPassword', 'type': 'str'},
'publishing_password_hash': {'key': 'properties.publishingPasswordHash', 'type': 'str'},
'publishing_password_hash_salt': {'key': 'properties.publishingPasswordHashSalt', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
user_name: Optional[str] = None,
publishing_user_name: Optional[str] = None,
publishing_password: Optional[str] = None,
publishing_password_hash: Optional[str] = None,
publishing_password_hash_salt: Optional[str] = None,
**kwargs
):
super(User, self).__init__(kind=kind, **kwargs)
self.user_name = user_name
self.publishing_user_name = publishing_user_name
self.publishing_password = publishing_password
self.publishing_password_hash = publishing_password_hash
self.publishing_password_hash_salt = publishing_password_hash_salt
class VirtualApplication(msrest.serialization.Model):
"""Virtual application in an app.
:param virtual_path: Virtual path.
:type virtual_path: str
:param physical_path: Physical path.
:type physical_path: str
:param preload_enabled: :code:`<code>true</code>` if preloading is enabled; otherwise,
:code:`<code>false</code>`.
:type preload_enabled: bool
:param virtual_directories: Virtual directories for virtual application.
:type virtual_directories: list[~azure.mgmt.web.v2016_08_01.models.VirtualDirectory]
"""
_attribute_map = {
'virtual_path': {'key': 'virtualPath', 'type': 'str'},
'physical_path': {'key': 'physicalPath', 'type': 'str'},
'preload_enabled': {'key': 'preloadEnabled', 'type': 'bool'},
'virtual_directories': {'key': 'virtualDirectories', 'type': '[VirtualDirectory]'},
}
def __init__(
self,
*,
virtual_path: Optional[str] = None,
physical_path: Optional[str] = None,
preload_enabled: Optional[bool] = None,
virtual_directories: Optional[List["VirtualDirectory"]] = None,
**kwargs
):
super(VirtualApplication, self).__init__(**kwargs)
self.virtual_path = virtual_path
self.physical_path = physical_path
self.preload_enabled = preload_enabled
self.virtual_directories = virtual_directories
class VirtualDirectory(msrest.serialization.Model):
"""Directory for virtual application.
:param virtual_path: Path to virtual application.
:type virtual_path: str
:param physical_path: Physical path.
:type physical_path: str
"""
_attribute_map = {
'virtual_path': {'key': 'virtualPath', 'type': 'str'},
'physical_path': {'key': 'physicalPath', 'type': 'str'},
}
def __init__(
self,
*,
virtual_path: Optional[str] = None,
physical_path: Optional[str] = None,
**kwargs
):
super(VirtualDirectory, self).__init__(**kwargs)
self.virtual_path = virtual_path
self.physical_path = physical_path
class VnetGateway(ProxyOnlyResource):
"""The Virtual Network gateway contract. This is used to give the Virtual Network gateway access to the VPN package.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param vnet_name: The Virtual Network name.
:type vnet_name: str
:param vpn_package_uri: The URI where the VPN package can be downloaded.
:type vpn_package_uri: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vpn_package_uri': {'key': 'properties.vpnPackageUri', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
vnet_name: Optional[str] = None,
vpn_package_uri: Optional[str] = None,
**kwargs
):
super(VnetGateway, self).__init__(kind=kind, **kwargs)
self.vnet_name = vnet_name
self.vpn_package_uri = vpn_package_uri
class VnetInfo(ProxyOnlyResource):
"""Virtual Network information contract.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param vnet_resource_id: The Virtual Network's resource ID.
:type vnet_resource_id: str
:ivar cert_thumbprint: The client certificate thumbprint.
:vartype cert_thumbprint: str
:param cert_blob: A certificate file (.cer) blob containing the public key of the private key
used to authenticate a
Point-To-Site VPN connection.
:type cert_blob: bytearray
:ivar routes: The routes that this Virtual Network connection uses.
:vartype routes: list[~azure.mgmt.web.v2016_08_01.models.VnetRoute]
:ivar resync_required: :code:`<code>true</code>` if a resync is required; otherwise,
:code:`<code>false</code>`.
:vartype resync_required: bool
:param dns_servers: DNS servers to be used by this Virtual Network. This should be a
comma-separated list of IP addresses.
:type dns_servers: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'cert_thumbprint': {'readonly': True},
'routes': {'readonly': True},
'resync_required': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_resource_id': {'key': 'properties.vnetResourceId', 'type': 'str'},
'cert_thumbprint': {'key': 'properties.certThumbprint', 'type': 'str'},
'cert_blob': {'key': 'properties.certBlob', 'type': 'bytearray'},
'routes': {'key': 'properties.routes', 'type': '[VnetRoute]'},
'resync_required': {'key': 'properties.resyncRequired', 'type': 'bool'},
'dns_servers': {'key': 'properties.dnsServers', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
vnet_resource_id: Optional[str] = None,
cert_blob: Optional[bytearray] = None,
dns_servers: Optional[str] = None,
**kwargs
):
super(VnetInfo, self).__init__(kind=kind, **kwargs)
self.vnet_resource_id = vnet_resource_id
self.cert_thumbprint = None
self.cert_blob = cert_blob
self.routes = None
self.resync_required = None
self.dns_servers = dns_servers
class VnetRoute(ProxyOnlyResource):
"""Virtual Network route contract used to pass routing information for a Virtual Network.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param vnet_route_name: The name of this route. This is only returned by the server and does
not need to be set by the client.
:type vnet_route_name: str
:param start_address: The starting address for this route. This may also include a CIDR
notation, in which case the end address must not be specified.
:type start_address: str
:param end_address: The ending address for this route. If the start address is specified in
CIDR notation, this must be omitted.
:type end_address: str
:param route_type: The type of route this is:
DEFAULT - By default, every app has routes to the local address ranges specified by RFC1918
INHERITED - Routes inherited from the real Virtual Network routes
STATIC - Static route set on the app only
These values will be used for syncing an app's routes with those from a Virtual Network.
Possible values include: "DEFAULT", "INHERITED", "STATIC".
:type route_type: str or ~azure.mgmt.web.v2016_08_01.models.RouteType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_route_name': {'key': 'properties.name', 'type': 'str'},
'start_address': {'key': 'properties.startAddress', 'type': 'str'},
'end_address': {'key': 'properties.endAddress', 'type': 'str'},
'route_type': {'key': 'properties.routeType', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
vnet_route_name: Optional[str] = None,
start_address: Optional[str] = None,
end_address: Optional[str] = None,
route_type: Optional[Union[str, "RouteType"]] = None,
**kwargs
):
super(VnetRoute, self).__init__(kind=kind, **kwargs)
self.vnet_route_name = vnet_route_name
self.start_address = start_address
self.end_address = end_address
self.route_type = route_type
class WebAppCollection(msrest.serialization.Model):
"""Collection of App Service apps.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.Site]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Site]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Site"],
**kwargs
):
super(WebAppCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class WebAppInstanceCollection(msrest.serialization.Model):
"""Collection of app instances.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.SiteInstance]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SiteInstance]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["SiteInstance"],
**kwargs
):
super(WebAppInstanceCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class WebJob(ProxyOnlyResource):
"""Web Job Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar name_properties_name: Job name. Used as job identifier in ARM resource URI.
:vartype name_properties_name: str
:param run_command: Run command.
:type run_command: str
:param url: Job URL.
:type url: str
:param extra_info_url: Extra Info URL.
:type extra_info_url: str
:param job_type: Job type. Possible values include: "Continuous", "Triggered".
:type job_type: str or ~azure.mgmt.web.v2016_08_01.models.WebJobType
:param error: Error information.
:type error: str
:param using_sdk: Using SDK?.
:type using_sdk: bool
:param settings: Job settings.
:type settings: dict[str, any]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'name_properties_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'run_command': {'key': 'properties.runCommand', 'type': 'str'},
'url': {'key': 'properties.url', 'type': 'str'},
'extra_info_url': {'key': 'properties.extraInfoUrl', 'type': 'str'},
'job_type': {'key': 'properties.jobType', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'str'},
'using_sdk': {'key': 'properties.usingSdk', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': '{object}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
run_command: Optional[str] = None,
url: Optional[str] = None,
extra_info_url: Optional[str] = None,
job_type: Optional[Union[str, "WebJobType"]] = None,
error: Optional[str] = None,
using_sdk: Optional[bool] = None,
settings: Optional[Dict[str, Any]] = None,
**kwargs
):
super(WebJob, self).__init__(kind=kind, **kwargs)
self.name_properties_name = None
self.run_command = run_command
self.url = url
self.extra_info_url = extra_info_url
self.job_type = job_type
self.error = error
self.using_sdk = using_sdk
self.settings = settings
class WebJobCollection(msrest.serialization.Model):
"""Collection of Kudu web job information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_08_01.models.WebJob]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[WebJob]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["WebJob"],
**kwargs
):
super(WebJobCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
| mit |
robintw/scikit-image | skimage/feature/_daisy.py | 19 | 9777 | import numpy as np
from scipy import sqrt, pi, arctan2, cos, sin, exp
from scipy.ndimage import gaussian_filter
from .. import img_as_float, draw
from ..color import gray2rgb
from .._shared.utils import assert_nD
def daisy(img, step=4, radius=15, rings=3, histograms=8, orientations=8,
normalization='l1', sigmas=None, ring_radii=None, visualize=False):
'''Extract DAISY feature descriptors densely for the given image.
DAISY is a feature descriptor similar to SIFT formulated in a way that
allows for fast dense extraction. Typically, this is practical for
bag-of-features image representations.
The implementation follows Tola et al. [1]_ but deviate on the following
points:
* Histogram bin contribution are smoothed with a circular Gaussian
window over the tonal range (the angular range).
* The sigma values of the spatial Gaussian smoothing in this code do not
match the sigma values in the original code by Tola et al. [2]_. In
their code, spatial smoothing is applied to both the input image and
the center histogram. However, this smoothing is not documented in [1]_
and, therefore, it is omitted.
Parameters
----------
img : (M, N) array
Input image (greyscale).
step : int, optional
Distance between descriptor sampling points.
radius : int, optional
Radius (in pixels) of the outermost ring.
rings : int, optional
Number of rings.
histograms : int, optional
Number of histograms sampled per ring.
orientations : int, optional
Number of orientations (bins) per histogram.
normalization : [ 'l1' | 'l2' | 'daisy' | 'off' ], optional
How to normalize the descriptors
* 'l1': L1-normalization of each descriptor.
* 'l2': L2-normalization of each descriptor.
* 'daisy': L2-normalization of individual histograms.
* 'off': Disable normalization.
sigmas : 1D array of float, optional
Standard deviation of spatial Gaussian smoothing for the center
histogram and for each ring of histograms. The array of sigmas should
be sorted from the center and out. I.e. the first sigma value defines
the spatial smoothing of the center histogram and the last sigma value
defines the spatial smoothing of the outermost ring. Specifying sigmas
overrides the following parameter.
``rings = len(sigmas) - 1``
ring_radii : 1D array of int, optional
Radius (in pixels) for each ring. Specifying ring_radii overrides the
following two parameters.
``rings = len(ring_radii)``
``radius = ring_radii[-1]``
If both sigmas and ring_radii are given, they must satisfy the
following predicate since no radius is needed for the center
histogram.
``len(ring_radii) == len(sigmas) + 1``
visualize : bool, optional
Generate a visualization of the DAISY descriptors
Returns
-------
descs : array
Grid of DAISY descriptors for the given image as an array
dimensionality (P, Q, R) where
``P = ceil((M - radius*2) / step)``
``Q = ceil((N - radius*2) / step)``
``R = (rings * histograms + 1) * orientations``
descs_img : (M, N, 3) array (only if visualize==True)
Visualization of the DAISY descriptors.
References
----------
.. [1] Tola et al. "Daisy: An efficient dense descriptor applied to wide-
baseline stereo." Pattern Analysis and Machine Intelligence, IEEE
Transactions on 32.5 (2010): 815-830.
.. [2] http://cvlab.epfl.ch/software/daisy
'''
assert_nD(img, 2, 'img')
img = img_as_float(img)
# Validate parameters.
if sigmas is not None and ring_radii is not None \
and len(sigmas) - 1 != len(ring_radii):
raise ValueError('`len(sigmas)-1 != len(ring_radii)`')
if ring_radii is not None:
rings = len(ring_radii)
radius = ring_radii[-1]
if sigmas is not None:
rings = len(sigmas) - 1
if sigmas is None:
sigmas = [radius * (i + 1) / float(2 * rings) for i in range(rings)]
if ring_radii is None:
ring_radii = [radius * (i + 1) / float(rings) for i in range(rings)]
if normalization not in ['l1', 'l2', 'daisy', 'off']:
raise ValueError('Invalid normalization method.')
# Compute image derivatives.
dx = np.zeros(img.shape)
dy = np.zeros(img.shape)
dx[:, :-1] = np.diff(img, n=1, axis=1)
dy[:-1, :] = np.diff(img, n=1, axis=0)
# Compute gradient orientation and magnitude and their contribution
# to the histograms.
grad_mag = sqrt(dx ** 2 + dy ** 2)
grad_ori = arctan2(dy, dx)
orientation_kappa = orientations / pi
orientation_angles = [2 * o * pi / orientations - pi
for o in range(orientations)]
hist = np.empty((orientations,) + img.shape, dtype=float)
for i, o in enumerate(orientation_angles):
# Weigh bin contribution by the circular normal distribution
hist[i, :, :] = exp(orientation_kappa * cos(grad_ori - o))
# Weigh bin contribution by the gradient magnitude
hist[i, :, :] = np.multiply(hist[i, :, :], grad_mag)
# Smooth orientation histograms for the center and all rings.
sigmas = [sigmas[0]] + sigmas
hist_smooth = np.empty((rings + 1,) + hist.shape, dtype=float)
for i in range(rings + 1):
for j in range(orientations):
hist_smooth[i, j, :, :] = gaussian_filter(hist[j, :, :],
sigma=sigmas[i])
# Assemble descriptor grid.
theta = [2 * pi * j / histograms for j in range(histograms)]
desc_dims = (rings * histograms + 1) * orientations
descs = np.empty((desc_dims, img.shape[0] - 2 * radius,
img.shape[1] - 2 * radius))
descs[:orientations, :, :] = hist_smooth[0, :, radius:-radius,
radius:-radius]
idx = orientations
for i in range(rings):
for j in range(histograms):
y_min = radius + int(round(ring_radii[i] * sin(theta[j])))
y_max = descs.shape[1] + y_min
x_min = radius + int(round(ring_radii[i] * cos(theta[j])))
x_max = descs.shape[2] + x_min
descs[idx:idx + orientations, :, :] = hist_smooth[i + 1, :,
y_min:y_max,
x_min:x_max]
idx += orientations
descs = descs[:, ::step, ::step]
descs = descs.swapaxes(0, 1).swapaxes(1, 2)
# Normalize descriptors.
if normalization != 'off':
descs += 1e-10
if normalization == 'l1':
descs /= np.sum(descs, axis=2)[:, :, np.newaxis]
elif normalization == 'l2':
descs /= sqrt(np.sum(descs ** 2, axis=2))[:, :, np.newaxis]
elif normalization == 'daisy':
for i in range(0, desc_dims, orientations):
norms = sqrt(np.sum(descs[:, :, i:i + orientations] ** 2,
axis=2))
descs[:, :, i:i + orientations] /= norms[:, :, np.newaxis]
if visualize:
descs_img = gray2rgb(img)
for i in range(descs.shape[0]):
for j in range(descs.shape[1]):
# Draw center histogram sigma
color = (1, 0, 0)
desc_y = i * step + radius
desc_x = j * step + radius
coords = draw.circle_perimeter(desc_y, desc_x, int(sigmas[0]))
draw.set_color(descs_img, coords, color)
max_bin = np.max(descs[i, j, :])
for o_num, o in enumerate(orientation_angles):
# Draw center histogram bins
bin_size = descs[i, j, o_num] / max_bin
dy = sigmas[0] * bin_size * sin(o)
dx = sigmas[0] * bin_size * cos(o)
coords = draw.line(desc_y, desc_x, int(desc_y + dy),
int(desc_x + dx))
draw.set_color(descs_img, coords, color)
for r_num, r in enumerate(ring_radii):
color_offset = float(1 + r_num) / rings
color = (1 - color_offset, 1, color_offset)
for t_num, t in enumerate(theta):
# Draw ring histogram sigmas
hist_y = desc_y + int(round(r * sin(t)))
hist_x = desc_x + int(round(r * cos(t)))
coords = draw.circle_perimeter(hist_y, hist_x,
int(sigmas[r_num + 1]))
draw.set_color(descs_img, coords, color)
for o_num, o in enumerate(orientation_angles):
# Draw histogram bins
bin_size = descs[i, j, orientations + r_num *
histograms * orientations +
t_num * orientations + o_num]
bin_size /= max_bin
dy = sigmas[r_num + 1] * bin_size * sin(o)
dx = sigmas[r_num + 1] * bin_size * cos(o)
coords = draw.line(hist_y, hist_x,
int(hist_y + dy),
int(hist_x + dx))
draw.set_color(descs_img, coords, color)
return descs, descs_img
else:
return descs
| bsd-3-clause |
bottydim/detect-credit-card-fraud | ccfd_dnn/model_weight.py | 1 | 20628 | import os
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
import pandas as pd
import matplotlib
import numpy as np
import math
import matplotlib.pyplot as plt
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
import plotly.tools as tls
import pandas as pd
from sqlalchemy import create_engine # database connection
import datetime as dt
import io
import logging
import plotly.plotly as py # interactive graphing
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from plotly.graph_objs import Bar, Scatter, Marker, Layout
from heraspy.model import HeraModel
np.random.seed(1337)
import theano
import keras
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model,model_from_yaml
from keras.layers import Input, Dense, GRU, LSTM, TimeDistributed, Masking,merge
from model import *
import argparse
import sys
if __name__ == "__main__":
t_start = dt.datetime.now()
parser = argparse.ArgumentParser(prog='Weighted Model')
parser.add_argument('-t','--table',required=True)
args = parser.parse_args()
####################################DATA SOURCE################################
table = vars(args)['table']
# table = 'data_trim'
# rsl_file = './data/gs_results_trim.csv'
# rsl_file = './data/psql_data_trim.csv'
# table = 'data_little_enc'
# rsl_file = './data/gs_results_little.csv'
# table = 'data_more'
# rsl_file = './data/gs_results_more.csv'
# table = 'auth'
# rsl_file = './data/auth.csv'
events_tbl = 'event'
events_tbl = None
rsl_file = './data/psql_{table}.csv'.format(table=table)
################################################################################
print "Commencing..."
data_dir = './data/'
evt_name = 'Featurespace_events_output.csv'
auth_name = 'Featurespace_auths_output.csv'
db_name = 'c1_agg.db'
address = "postgresql+pg8000://script@localhost:5432/ccfd"
# disk_engine = create_engine('sqlite:///'+data_dir+db_name,convert_unicode=True)
# disk_engine.raw_connection().connection.text_factory = str
disk_engine = create_engine(address)
#######################Settings#############################################
samples_per_epoch = trans_num_table(table,disk_engine,mode='train',trans_mode='train')
# epoch_limit = 10000
# samples_per_epoch = epoch_limit
# user_sample_size = 8000
epoch_limit = samples_per_epoch
user_sample_size = None
nb_epoch = 300
fraud_w_list = [1000.]
##########ENCODERS CONF
tbl_src = 'auth'
# tbl_src = table
tbl_evnt = 'event'
##################################
batch_size = 300
batch_size_val = 1000
print "SAMPLES per epoch:",samples_per_epoch
print "User sample size:",user_sample_size
print 'sequence length size',batch_size
# samples_per_epoch = 1959
# table = 'data_trim'
# samples_per_epoch = 485
lbl_pad_val = 2
pad_val = 0
# dropout_W_list = [0.3]
dropout_W_list = [0.4,0.5,0.6,0.7]
# dropout_W_list = [0.15,0.3,0.4,0.8]
input_dim = 44
hid_dims = [320]
num_l = [7]
lr_s = [2.5e-4]
# lr_s = [1.25e-4,6e-5]
# lr_s = [1e-2,1e-3,1e-4]
# lr_s = [1e-1,1e-2,1e-3]
num_opt = 1
opts = lambda x,lr:[keras.optimizers.RMSprop(lr=lr, rho=0.9, epsilon=1e-08),
# keras.optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08),
# keras.optimizers.Nadam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
][x]
# add_info = str(int(seq_len_param))+'_class_w_'+str(fraud_w)
print 'Populating encoders'
path_encoders ='./data/encoders/{tbl_src}+{tbl_evnt}'.format(tbl_src=tbl_src,tbl_evnt=tbl_evnt)
if os.path.exists(path_encoders):
encoders = load_encoders(path_encoders)
else:
encoders = populate_encoders_scale(tbl_src,disk_engine,tbl_evnt)
with open(path_encoders, 'wb') as output:
pickle.dump(encoders, output, pickle.HIGHEST_PROTOCOL)
print 'ENCODERS SAVED to {path}!'.format(path=path_encoders)
# sys.exit()
gru_dict = {}
lstm_dict = {}
for fraud_w in fraud_w_list:
add_info = 'Mask=pad_class_w_'+str(fraud_w)+'ES-OFF'
class_weight = {0 : 1.,
1: fraud_w,
2: 0.}
for dropout_W in dropout_W_list:
for hidden_dim in hid_dims:
# gru
for opt_id in range(num_opt):
for lr in lr_s:
optimizer = opts(opt_id,lr)
for num_layers in num_l:
for rnn in ['gru']:
short_title = 'bi_'+rnn.upper()+'_'+str(hidden_dim)+'_'+str(num_layers)+'_DO-'+str(dropout_W)+'_w'+str(class_weight[1])
title = 'Bidirectional_Class'+str(class_weight[1])+'_'+rnn.upper()+'_'+str(hidden_dim)+'_'+str(num_layers)+'_'+str(type(optimizer).__name__)+'_'+str(lr)+'_epochs_'+str(nb_epoch)+'_DO-'+str(dropout_W)
print title
input_layer = Input(shape=(int(seq_len_param), input_dim),name='main_input')
mask = Masking(mask_value=pad_val)(input_layer)
x = mask
for i in range(num_layers):
if rnn == 'gru':
prev_frw = GRU(hidden_dim,#input_length=50,
return_sequences=True,go_backwards=False,stateful=False,
unroll=False,consume_less='gpu',
init='glorot_uniform', inner_init='orthogonal', activation='tanh',
inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None,
b_regularizer=None, dropout_W=dropout_W, dropout_U=0.0)(x)
prev_bck = GRU(hidden_dim,#input_length=50,
return_sequences=True,go_backwards=True,stateful=False,
unroll=False,consume_less='gpu',
init='glorot_uniform', inner_init='orthogonal', activation='tanh',
inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None,
b_regularizer=None, dropout_W=dropout_W, dropout_U=0.0)(x)
else:
prev_frw = LSTM(hidden_dim, return_sequences=True,go_backwards=False,stateful=False,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=dropout_W, dropout_U=0.0)(x)
prev_bck = LSTM(hidden_dim, return_sequences=True,go_backwards=True,stateful=False,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=dropout_W, dropout_U=0.0)(x)
x = merge([prev_frw, prev_bck], mode='concat')
output_layer = TimeDistributed(Dense(3,activation='softmax'))(x)
model = Model(input=[input_layer],output=[output_layer])
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
sample_weight_mode="temporal")
########save architecture ######
arch_dir = './data/models/archs/'+short_title+'.yml'
yaml_string = model.to_yaml()
with open(arch_dir, 'wb') as output:
pickle.dump(yaml_string, output, pickle.HIGHEST_PROTOCOL)
print 'model saved!'
##############
user_mode = 'train'
trans_mode = 'train'
data_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=class_weight,lbl_pad_val = lbl_pad_val, pad_val = pad_val,
sub_sample=user_sample_size,epoch_size=epoch_limit,events_tbl=events_tbl)
# sub_sample=user_sample_size,epoch_size=samples_per_epoch)
########validation data
print 'Generating Validation set!'
user_mode = 'test'
trans_mode = 'test'
val_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size_val,usr_ratio=80,class_weight=class_weight,lbl_pad_val = lbl_pad_val, pad_val = pad_val,
sub_sample=None,epoch_size=None,events_tbl=events_tbl)
validation_data = next(val_gen)
print '################GENERATED#######################'
###############CALLBACKS
patience = 30
early_Stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, verbose=0, mode='auto')
save_path = './data/models/'+table+'/'
var_name = '.{epoch:02d}-{val_loss:.5f}.hdf5'
checkpoint = keras.callbacks.ModelCheckpoint(save_path+short_title+var_name, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
root_url = 'http://localhost:9000'
remote_log = keras.callbacks.RemoteMonitor(root=root_url)
# callbacks = [early_Stop,checkpoint]
callbacks = [early_Stop,checkpoint,remote_log]
callbacks = []
history = model.fit_generator(data_gen, samples_per_epoch, nb_epoch, verbose=1, callbacks=callbacks,validation_data=validation_data, nb_val_samples=None, class_weight=None, max_q_size=10000)
py.sign_in('bottydim', 'o1kuyms9zv')
auc_list = []
print '#########################TRAIN STATS################'
user_mode = 'train'
trans_mode = 'train'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
data_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=None,lbl_pad_val = lbl_pad_val, pad_val = pad_val,events_tbl=events_tbl)
eval_list = eval_auc_generator(model, data_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '##################EVALUATION USERS#########################'
user_mode = 'test'
trans_mode = 'train'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
eval_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=None,lbl_pad_val = lbl_pad_val, pad_val = pad_val,events_tbl=events_tbl)
eval_list = eval_auc_generator(model, eval_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '#####################################################'
print '##################EVALUATION Transactions#########################'
user_mode = 'train'
trans_mode = 'test'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
eval_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=None,lbl_pad_val = lbl_pad_val, pad_val = pad_val,events_tbl=events_tbl)
eval_list = eval_auc_generator(model, eval_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '#####################################################'
print '##################EVALUATION Pure#########################'
user_mode = 'test'
trans_mode = 'test'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
eval_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=None,lbl_pad_val = lbl_pad_val, pad_val = pad_val,events_tbl=events_tbl)
eval_list = eval_auc_generator(model, eval_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '#####################################################'
with io.open(rsl_file, 'a', encoding='utf-8') as file:
auc_string = ','.join(auc_list)
title_csv = title.replace('_',',')+','+str(history.history['acc'][-1])+','+str(history.history['loss'][-1])+','+str(auc_val)+','+str(acc)+','+auc_string+'\n'
file.write(unicode(title_csv))
print 'logged @ {file}'.format(file=rsl_file)
trim_point = -15
fig = {
'data': [Scatter(
x=history.epoch[trim_point:],
y=history.history['loss'][trim_point:])],
'layout': {'title': title}
}
py.image.save_as(fig,filename='./results/figures/'+table+'/'+short_title+'_'+'LOSS'+'_'+add_info+".png")
trim_point = 0
fig = {
'data': [Scatter(
x=history.epoch[trim_point:],
y=history.history['loss'][trim_point:])],
'layout': {'title': title}
}
py.image.save_as(fig,filename='./results/figures/'+table+'/'+short_title+'_'+'LOSS'+'_'+'FULL'+".png")
# iplot(fig,filename='figures/'+title,image='png')
# title = title.replace('Loss','Acc')
fig = {
'data': [Scatter(
x=history.epoch[trim_point:],
y=history.history['acc'][trim_point:])],
'layout': {'title': title}
}
filename_val='./results/figures/'+table+'/'+short_title+'_'+'ACC'+'_'+add_info+".png"
py.image.save_as(fig,filename=filename_val)
print 'exported @',filename_val
fig = {
'data': [Scatter(
x=history.epoch[trim_point:],
y=history.history['val_loss'][trim_point:])],
'layout': {'title': title}
}
py.image.save_as(fig,filename='./results/figures/'+table+'/'+short_title+'_'+'VAL LOSS'+'_'+add_info+".png")
print 'time taken: {time}'.format(time=days_hours_minutes_seconds(dt.datetime.now()-t_start)) | mit |
jagg81/translate-toolkit | build/lib.linux-x86_64-2.6/translate/lang/kn.py | 4 | 1094 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""This module represents Kannada language.
For more information, see U{http://en.wikipedia.org/wiki/Kannada_language}
"""
from translate.lang import common
class kn(common.Common):
"""This class represents Kannada."""
ignoretests = ["startcaps", "simplecaps"]
| gpl-2.0 |
eiginn/platformio | platformio/builder/scripts/titiva.py | 3 | 1759 | # Copyright 2014-2016 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Builder for Texas Instruments
Tiva C Series ARM Cortex-M4 microcontrollers.
"""
from os.path import join
from SCons.Script import (COMMAND_LINE_TARGETS, AlwaysBuild, Default,
DefaultEnvironment, SConscript)
env = DefaultEnvironment()
SConscript(env.subst(join("$PIOBUILDER_DIR", "scripts", "basearm.py")))
env.Replace(
UPLOADER=join("$PIOPACKAGES_DIR", "tool-lm4flash", "lm4flash"),
UPLOADCMD='"$UPLOADER" $SOURCES'
)
env.Append(
LINKFLAGS=[
"-nostartfiles",
"-nostdlib"
]
)
#
# Target: Build executable and linkable firmware
#
target_elf = env.BuildProgram()
#
# Target: Build the .bin file
#
if "uploadlazy" in COMMAND_LINE_TARGETS:
target_firm = join("$BUILD_DIR", "firmware.bin")
else:
target_firm = env.ElfToBin(join("$BUILD_DIR", "firmware"), target_elf)
#
# Target: Print binary size
#
target_size = env.Alias("size", target_elf, "$SIZEPRINTCMD")
AlwaysBuild(target_size)
#
# Target: Upload firmware
#
upload = env.Alias(["upload", "uploadlazy"], target_firm, "$UPLOADCMD")
AlwaysBuild(upload)
#
# Target: Define targets
#
Default([target_firm, target_size])
| apache-2.0 |
heeraj123/oh-mainline | mysite/customs/management/commands/export_bug_trackers.py | 15 | 1197 | # This file is part of OpenHatch.
# Copyright (C) 2012 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import yaml
from django.core.management.base import BaseCommand
import mysite.customs.models
class Command(BaseCommand):
help = "Print a YAML file with configuration all Trac-based trackers"
def handle(self, *args, **options):
as_dicts = []
for tracker in mysite.customs.models.TracTrackerModel.objects.select_subclasses():
as_dict = tracker.as_dict()
as_dicts.append(as_dict)
print yaml.safe_dump(as_dicts)
| agpl-3.0 |
GbalsaC/bitnamiP | venv/src/edx-submissions/submissions/tests/test_models.py | 1 | 5673 | """
Tests for submission models.
"""
from django.test import TestCase
from submissions.models import Submission, Score, ScoreSummary, StudentItem
class TestScoreSummary(TestCase):
"""
Test selection of options from a rubric.
"""
def test_latest(self):
item = StudentItem.objects.create(
student_id="score_test_student",
course_id="score_test_course",
item_id="i4x://mycourse/class_participation.section_attendance"
)
first_score = Score.objects.create(
student_item=item,
submission=None,
points_earned=8,
points_possible=10,
)
second_score = Score.objects.create(
student_item=item,
submission=None,
points_earned=5,
points_possible=10,
)
latest_score = ScoreSummary.objects.get(student_item=item).latest
self.assertEqual(second_score, latest_score)
def test_highest(self):
item = StudentItem.objects.create(
student_id="score_test_student",
course_id="score_test_course",
item_id="i4x://mycourse/special_presentation"
)
# Low score is higher than no score...
low_score = Score.objects.create(
student_item=item,
points_earned=0,
points_possible=0,
)
self.assertEqual(
low_score,
ScoreSummary.objects.get(student_item=item).highest
)
# Medium score should supplant low score
med_score = Score.objects.create(
student_item=item,
points_earned=8,
points_possible=10,
)
self.assertEqual(
med_score,
ScoreSummary.objects.get(student_item=item).highest
)
# Even though the points_earned is higher in the med_score, high_score
# should win because it's 4/4 as opposed to 8/10.
high_score = Score.objects.create(
student_item=item,
points_earned=4,
points_possible=4,
)
self.assertEqual(
high_score,
ScoreSummary.objects.get(student_item=item).highest
)
# Put another medium score to make sure it doesn't get set back down
med_score2 = Score.objects.create(
student_item=item,
points_earned=5,
points_possible=10,
)
self.assertEqual(
high_score,
ScoreSummary.objects.get(student_item=item).highest
)
self.assertEqual(
med_score2,
ScoreSummary.objects.get(student_item=item).latest
)
def test_reset_score_highest(self):
item = StudentItem.objects.create(
student_id="score_test_student",
course_id="score_test_course",
item_id="i4x://mycourse/special_presentation"
)
# Reset score with no score
Score.create_reset_score(item)
highest = ScoreSummary.objects.get(student_item=item).highest
self.assertEqual(highest.points_earned, 0)
self.assertEqual(highest.points_possible, 0)
# Non-reset score after a reset score
submission = Submission.objects.create(student_item=item, attempt_number=1)
Score.objects.create(
student_item=item,
submission=submission,
points_earned=2,
points_possible=3,
)
highest = ScoreSummary.objects.get(student_item=item).highest
self.assertEqual(highest.points_earned, 2)
self.assertEqual(highest.points_possible, 3)
# Reset score after a non-reset score
Score.create_reset_score(item)
highest = ScoreSummary.objects.get(student_item=item).highest
self.assertEqual(highest.points_earned, 0)
self.assertEqual(highest.points_possible, 0)
def test_highest_score_hidden(self):
item = StudentItem.objects.create(
student_id="score_test_student",
course_id="score_test_course",
item_id="i4x://mycourse/special_presentation"
)
# Score with points possible set to 0
# (by convention a "hidden" score)
submission = Submission.objects.create(student_item=item, attempt_number=1)
Score.objects.create(
student_item=item,
submission=submission,
points_earned=0,
points_possible=0,
)
highest = ScoreSummary.objects.get(student_item=item).highest
self.assertEqual(highest.points_earned, 0)
self.assertEqual(highest.points_possible, 0)
# Score with points
submission = Submission.objects.create(student_item=item, attempt_number=1)
Score.objects.create(
student_item=item,
submission=submission,
points_earned=1,
points_possible=2,
)
highest = ScoreSummary.objects.get(student_item=item).highest
self.assertEqual(highest.points_earned, 1)
self.assertEqual(highest.points_possible, 2)
# Another score with points possible set to 0
# The previous score should remain the highest score.
submission = Submission.objects.create(student_item=item, attempt_number=1)
Score.objects.create(
student_item=item,
submission=submission,
points_earned=0,
points_possible=0,
)
highest = ScoreSummary.objects.get(student_item=item).highest
self.assertEqual(highest.points_earned, 1)
self.assertEqual(highest.points_possible, 2)
| agpl-3.0 |
benoitsteiner/tensorflow-opencl | tensorflow/contrib/quantization/python/array_ops.py | 178 | 1156 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized Array Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops import gen_array_ops as quantized_gen_array_ops
from tensorflow.python.ops.gen_array_ops import dequantize
from tensorflow.python.ops.gen_array_ops import quantize_v2
from tensorflow.python.ops.gen_array_ops import quantized_concat
# pylint: enable=unused-import
| apache-2.0 |
brandonPurvis/osf.io | website/files/models/dataverse.py | 39 | 1543 | from framework.auth.core import _get_current_user
from website.files.models.base import File, Folder, FileNode, FileVersion
__all__ = ('DataverseFile', 'DataverseFolder', 'DataverseFileNode')
class DataverseFileNode(FileNode):
provider = 'dataverse'
class DataverseFolder(DataverseFileNode, Folder):
pass
class DataverseFile(DataverseFileNode, File):
version_identifier = 'version'
def update(self, revision, data, user=None):
"""Note: Dataverse only has psuedo versions, don't save them
Dataverse requires a user for the weird check below
and Django dies when _get_current_user is called
"""
self.name = data['name']
self.materialized_path = data['materialized']
self.save()
version = FileVersion(identifier=revision)
version.update_metadata(data, save=False)
user = user or _get_current_user()
if not user or not self.node.can_edit(user=user):
try:
# Users without edit permission can only see published files
if not data['extra']['hasPublishedVersion']:
# Blank out name and path for the render
# Dont save because there's no reason to persist the change
self.name = ''
self.materialized_path = ''
return (version, '<div class="alert alert-info" role="alert">This file does not exist.</div>')
except (KeyError, IndexError):
pass
return version
| apache-2.0 |
TheTypoMaster/my-vim-set-mac | .vim/bundle/YouCompleteMe/third_party/ycmd/third_party/jedi/test/test_evaluate/test_buildout_detection.py | 13 | 2751 | import os
from textwrap import dedent
from jedi._compatibility import u
from jedi.evaluate.sys_path import (_get_parent_dir_with_file,
_get_buildout_scripts,
sys_path_with_modifications,
_check_module)
from jedi.evaluate import Evaluator
from jedi.parser import Parser, load_grammar
from ..helpers import cwd_at
@cwd_at('test/test_evaluate/buildout_project/src/proj_name')
def test_parent_dir_with_file():
parent = _get_parent_dir_with_file(
os.path.abspath(os.curdir), 'buildout.cfg')
assert parent is not None
assert parent.endswith(os.path.join('test', 'test_evaluate', 'buildout_project'))
@cwd_at('test/test_evaluate/buildout_project/src/proj_name')
def test_buildout_detection():
scripts = _get_buildout_scripts(os.path.abspath('./module_name.py'))
assert len(scripts) == 1
curdir = os.path.abspath(os.curdir)
appdir_path = os.path.normpath(os.path.join(curdir, '../../bin/app'))
assert scripts[0] == appdir_path
def test_append_on_non_sys_path():
SRC = dedent(u("""
class Dummy(object):
path = []
d = Dummy()
d.path.append('foo')"""))
grammar = load_grammar()
p = Parser(grammar, SRC)
paths = _check_module(Evaluator(grammar), p.module)
assert len(paths) > 0
assert 'foo' not in paths
def test_path_from_invalid_sys_path_assignment():
SRC = dedent(u("""
import sys
sys.path = 'invalid'"""))
grammar = load_grammar()
p = Parser(grammar, SRC)
paths = _check_module(Evaluator(grammar), p.module)
assert len(paths) > 0
assert 'invalid' not in paths
@cwd_at('test/test_evaluate/buildout_project/src/proj_name/')
def test_sys_path_with_modifications():
SRC = dedent(u("""
import os
"""))
grammar = load_grammar()
p = Parser(grammar, SRC)
p.module.path = os.path.abspath(os.path.join(os.curdir, 'module_name.py'))
paths = sys_path_with_modifications(Evaluator(grammar), p.module)
assert '/tmp/.buildout/eggs/important_package.egg' in paths
def test_path_from_sys_path_assignment():
SRC = dedent(u("""
#!/usr/bin/python
import sys
sys.path[0:0] = [
'/usr/lib/python3.4/site-packages',
'/home/test/.buildout/eggs/important_package.egg'
]
path[0:0] = [1]
import important_package
if __name__ == '__main__':
sys.exit(important_package.main())"""))
grammar = load_grammar()
p = Parser(grammar, SRC)
paths = _check_module(Evaluator(grammar), p.module)
assert 1 not in paths
assert '/home/test/.buildout/eggs/important_package.egg' in paths
| gpl-2.0 |
takeshineshiro/nova | nova/tests/unit/api/openstack/compute/contrib/test_keypairs.py | 3 | 20933 | # Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute import keypairs as keypairs_v21
from nova.api.openstack.compute.legacy_v2.contrib import keypairs \
as keypairs_v2
from nova.api.openstack import wsgi as os_wsgi
from nova import db
from nova import exception
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_keypair
QUOTAS = quota.QUOTAS
keypair_data = {
'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT',
}
def fake_keypair(name):
return dict(test_keypair.fake_keypair,
name=name, **keypair_data)
def db_key_pair_get_all_by_user(self, user_id):
return [fake_keypair('FAKE')]
def db_key_pair_create(self, keypair):
return fake_keypair(name=keypair['name'])
def db_key_pair_destroy(context, user_id, name):
if not (user_id and name):
raise Exception()
def db_key_pair_create_duplicate(context, keypair):
raise exception.KeyPairExists(key_name=keypair.get('name', ''))
class KeypairsTestV21(test.TestCase):
base_url = '/v2/fake'
validation_error = exception.ValidationError
wsgi_api_version = os_wsgi.DEFAULT_API_VERSION
def _setup_app_and_controller(self):
self.app_server = fakes.wsgi_app_v21(init_only=('os-keypairs',
'servers'))
self.controller = keypairs_v21.KeypairController()
def setUp(self):
super(KeypairsTestV21, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Keypairs'])
self._setup_app_and_controller()
self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
def test_keypair_list(self):
res_dict = self.controller.index(self.req)
response = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE')}]}
self.assertEqual(res_dict, response)
def test_keypair_create(self):
body = {'keypair': {'name': 'create_test'}}
res_dict = self.controller.create(self.req, body=body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
self._assert_keypair_type(res_dict)
def _test_keypair_create_bad_request_case(self,
body,
exception):
self.assertRaises(exception,
self.controller.create, self.req, body=body)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_create_with_name_too_long(self):
body = {
'keypair': {
'name': 'a' * 256
}
}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_create_with_non_alphanumeric_name(self):
body = {
'keypair': {
'name': 'test/keypair'
}
}
self._test_keypair_create_bad_request_case(body,
webob.exc.HTTPBadRequest)
def test_keypair_import_bad_key(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-what negative',
},
}
self._test_keypair_create_bad_request_case(body,
webob.exc.HTTPBadRequest)
def test_keypair_create_with_invalid_keypair_body(self):
body = {'alpha': {'name': 'create_test'}}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_import(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
res_dict = self.controller.create(self.req, body=body)
# FIXME(ja): sholud we check that public_key was sent to create?
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertNotIn('private_key', res_dict['keypair'])
self._assert_keypair_type(res_dict)
def test_keypair_import_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.req, body=body)
self.assertIn('Quota exceeded, too many key pairs.', ex.explanation)
def test_keypair_create_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
},
}
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.req, body=body)
self.assertIn('Quota exceeded, too many key pairs.', ex.explanation)
def test_keypair_create_duplicate(self):
self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
body = {'keypair': {'name': 'create_duplicate'}}
ex = self.assertRaises(webob.exc.HTTPConflict,
self.controller.create, self.req, body=body)
self.assertIn("Key pair 'create_duplicate' already exists.",
ex.explanation)
def test_keypair_delete(self):
self.controller.delete(self.req, 'FAKE')
def test_keypair_get_keypair_not_found(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, 'DOESNOTEXIST')
def test_keypair_delete_not_found(self):
def db_key_pair_get_not_found(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_get_not_found)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, self.req, 'FAKE')
def test_keypair_show(self):
def _db_key_pair_get(context, user_id, name):
return dict(test_keypair.fake_keypair,
name='foo', public_key='XXX', fingerprint='YYY',
type='ssh')
self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
res_dict = self.controller.show(self.req, 'FAKE')
self.assertEqual('foo', res_dict['keypair']['name'])
self.assertEqual('XXX', res_dict['keypair']['public_key'])
self.assertEqual('YYY', res_dict['keypair']['fingerprint'])
self._assert_keypair_type(res_dict)
def test_keypair_show_not_found(self):
def _db_key_pair_get(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, 'FAKE')
def test_show_server(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get())
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get())
req = webob.Request.blank(self.base_url + '/servers/1')
req.headers['Content-Type'] = 'application/json'
response = req.get_response(self.app_server)
self.assertEqual(response.status_int, 200)
res_dict = jsonutils.loads(response.body)
self.assertIn('key_name', res_dict['server'])
self.assertEqual(res_dict['server']['key_name'], '')
def test_detail_servers(self):
# Sort is disabled in v2 without an extension so stub out
# the non-sorted DB get
self.stubs.Set(db, 'instance_get_all_by_filters',
fakes.fake_instance_get_all_by_filters())
# But it is enabled in v3 so stub out the sorted function
self.stubs.Set(db, 'instance_get_all_by_filters_sort',
fakes.fake_instance_get_all_by_filters())
req = fakes.HTTPRequest.blank(self.base_url + '/servers/detail')
res = req.get_response(self.app_server)
server_dicts = jsonutils.loads(res.body)['servers']
self.assertEqual(len(server_dicts), 5)
for server_dict in server_dicts:
self.assertIn('key_name', server_dict)
self.assertEqual(server_dict['key_name'], '')
def _assert_keypair_type(self, res_dict):
self.assertNotIn('type', res_dict['keypair'])
class KeypairPolicyTestV21(test.TestCase):
KeyPairController = keypairs_v21.KeypairController()
policy_path = 'os_compute_api:os-keypairs'
def setUp(self):
super(KeypairPolicyTestV21, self).setUp()
def _db_key_pair_get(context, user_id, name):
return dict(test_keypair.fake_keypair,
name='foo', public_key='XXX', fingerprint='YYY',
type='ssh')
self.stubs.Set(db, "key_pair_get",
_db_key_pair_get)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
self.req = fakes.HTTPRequest.blank('')
def test_keypair_list_fail_policy(self):
rules = {self.policy_path + ':index':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
self.assertRaises(exception.Forbidden,
self.KeyPairController.index,
self.req)
def test_keypair_list_pass_policy(self):
rules = {self.policy_path + ':index':
common_policy.parse_rule('')}
policy.set_rules(rules)
res = self.KeyPairController.index(self.req)
self.assertIn('keypairs', res)
def test_keypair_show_fail_policy(self):
rules = {self.policy_path + ':show':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
self.assertRaises(exception.Forbidden,
self.KeyPairController.show,
self.req, 'FAKE')
def test_keypair_show_pass_policy(self):
rules = {self.policy_path + ':show':
common_policy.parse_rule('')}
policy.set_rules(rules)
res = self.KeyPairController.show(self.req, 'FAKE')
self.assertIn('keypair', res)
def test_keypair_create_fail_policy(self):
body = {'keypair': {'name': 'create_test'}}
rules = {self.policy_path + ':create':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
self.assertRaises(exception.Forbidden,
self.KeyPairController.create,
self.req, body=body)
def test_keypair_create_pass_policy(self):
body = {'keypair': {'name': 'create_test'}}
rules = {self.policy_path + ':create':
common_policy.parse_rule('')}
policy.set_rules(rules)
res = self.KeyPairController.create(self.req, body=body)
self.assertIn('keypair', res)
def test_keypair_delete_fail_policy(self):
rules = {self.policy_path + ':delete':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
self.assertRaises(exception.Forbidden,
self.KeyPairController.delete,
self.req, 'FAKE')
def test_keypair_delete_pass_policy(self):
rules = {self.policy_path + ':delete':
common_policy.parse_rule('')}
policy.set_rules(rules)
self.KeyPairController.delete(self.req, 'FAKE')
class KeypairsTestV2(KeypairsTestV21):
validation_error = webob.exc.HTTPBadRequest
def _setup_app_and_controller(self):
self.app_server = fakes.wsgi_app(init_only=('servers',))
self.controller = keypairs_v2.KeypairController()
class KeypairsTestV22(KeypairsTestV21):
wsgi_api_version = '2.2'
def test_keypair_list(self):
res_dict = self.controller.index(self.req)
expected = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE',
type='ssh')}]}
self.assertEqual(expected, res_dict)
def _assert_keypair_type(self, res_dict):
self.assertEqual('ssh', res_dict['keypair']['type'])
class KeypairsTestV210(KeypairsTestV22):
wsgi_api_version = '2.10'
def test_keypair_list_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs?user_id=foo',
version=self.wsgi_api_version,
use_admin_context=True)
with mock.patch.object(self.controller.api, 'get_key_pairs') as mock_g:
self.controller.index(req)
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('foo', userid)
def test_keypair_list_other_user_not_admin(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs?user_id=foo',
version=self.wsgi_api_version)
with mock.patch.object(self.controller.api, 'get_key_pairs'):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_keypair_show_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs/FAKE?user_id=foo',
version=self.wsgi_api_version,
use_admin_context=True)
with mock.patch.object(self.controller.api, 'get_key_pair') as mock_g:
self.controller.show(req, 'FAKE')
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('foo', userid)
def test_keypair_show_other_user_not_admin(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs/FAKE?user_id=foo',
version=self.wsgi_api_version)
with mock.patch.object(self.controller.api, 'get_key_pair'):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, req, 'FAKE')
def test_keypair_delete_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs/FAKE?user_id=foo',
version=self.wsgi_api_version,
use_admin_context=True)
with mock.patch.object(self.controller.api,
'delete_key_pair') as mock_g:
self.controller.delete(req, 'FAKE')
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('foo', userid)
def test_keypair_delete_other_user_not_admin(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs/FAKE?user_id=foo',
version=self.wsgi_api_version)
with mock.patch.object(self.controller.api, 'delete_key_pair'):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.delete, req, 'FAKE')
def test_keypair_create_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs',
version=self.wsgi_api_version,
use_admin_context=True)
body = {'keypair': {'name': 'create_test',
'user_id': '8861f37f-034e-4ca8-8abe-6d13c074574a'}}
with mock.patch.object(self.controller.api,
'create_key_pair',
return_value=(mock.MagicMock(), 1)) as mock_g:
res = self.controller.create(req, body=body)
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('8861f37f-034e-4ca8-8abe-6d13c074574a', userid)
self.assertIn('keypair', res)
def test_keypair_import_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs',
version=self.wsgi_api_version,
use_admin_context=True)
body = {'keypair': {'name': 'create_test',
'user_id': '8861f37f-034e-4ca8-8abe-6d13c074574a',
'public_key': 'public_key'}}
with mock.patch.object(self.controller.api,
'import_key_pair') as mock_g:
res = self.controller.create(req, body=body)
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('8861f37f-034e-4ca8-8abe-6d13c074574a', userid)
self.assertIn('keypair', res)
def test_keypair_create_other_user_not_admin(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs',
version=self.wsgi_api_version)
body = {'keypair': {'name': 'create_test',
'user_id': '8861f37f-034e-4ca8-8abe-6d13c074574a'}}
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create,
req, body=body)
class KeypairPolicyTestV2(KeypairPolicyTestV21):
KeyPairController = keypairs_v2.KeypairController()
policy_path = 'compute_extension:keypairs'
| apache-2.0 |
dostavro/dotfiles | sublime2/Packages/SublimeCodeIntel/libs/codeintel2/database/resource.py | 2 | 5541 | #!python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
# TODO: docstring
import os
from os.path import join, dirname, abspath, isabs
import logging
#---- globals
log = logging.getLogger("codeintel.db")
# log.setLevel(logging.DEBUG)
#---- Resource classes
# For abstraction and canonicalization of paths.
class Resource(object):
"""A reference to a resource for the database.
Typically this is just a path to a file on the local disk. However
the intention is to also support remote file urls (TODO) and unsaved
files (TODO).
This class also provides canonicalization on comparison of resource
paths.
"""
def __init__(self, path):
self.path = path
@property
def canon_path(self):
# normalize os.altsep to os.sep? or even consider normalizing to
# all '/'. This gets more complicated if have URL resources for
# remote files: subclassing.
XXX
class AreaResource(Resource):
"""A resource that is at a relative path under some area.
For example, at 'template/Perl.pl' under 'the Komodo user data
dir' or at 'catalog/baz.cix' under 'the codeintel2 package dir'.
TODO: change ctor sig to AreaResource([area, ] path). More logical
to have input be in same order as .area_path.
"""
# The known path areas. We only have use for the one right now.
_path_areas = {
"ci-pkg-dir": dirname(dirname(abspath(__file__))),
}
_ordered_area_items = [(d, a) for a, d in _path_areas.items()]
_ordered_area_items.sort(key=lambda i: len(i[0]), reverse=True)
@classmethod
def area_and_subpath_from_path(cls, path):
# XXX Need to worry about canonicalization!
for area_dir, area in cls._ordered_area_items:
if (path.startswith(area_dir)
# Ensure we are matching at a dir boundary. This implies
# a limitation that there *is* a subpath. I'm fine with
# that.
and path[len(area_dir)] in (os.sep, os.altsep)):
return area, path[len(area_dir)+1:]
return None, path
def __init__(self, path, area=None):
"""Create an area-relative resource.
"path" is either the full path to the resource, or a
relative path under the given area name. "area" must be
specified for the latter.
"area" (optional) can be given to specify under which area
this resource resides. If not given, the best-fit of the
known path areas will be used.
"""
if area is not None:
if area not in self._path_areas:
raise ValueError("unknown path area: `%s'" % area)
self.area = area
if isabs(path):
area_base = self._path_areas[area]
if not path.startswith(area_base):
raise ValueError("cannot create AreaResource: `%s' is "
"not under `%s' area (%s)"
% (path, area, area_base))
self.subpath = path[len(area_base)+1:]
else:
self.subpath = path
elif isinstance(path, tuple): # as per AreaResource.area_path
self.area, self.subpath = path
else:
self.area, self.subpath = self.area_and_subpath_from_path(path)
def __str__(self):
if self.area:
return "[%s]%s%s" % (self.area, os.sep, self.subpath)
else:
return self.subpath
def __repr__(self):
return "AreaResource(%r, %r)" % (self.path, self.area)
@property
def area_path(self):
return (self.area, self.subpath)
@property
def path(self):
if self.area is None:
return self.subpath
else:
return join(self._path_areas[self.area], self.subpath)
| mit |
HugoKuo/keystone-essex3 | keystone/backends/ldap/__init__.py | 2 | 1094 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ldap
import keystone.backends.api as top_api
import keystone.backends.models as top_models
from keystone import utils
from . import api
from . import models
def configure_backend(conf):
api_obj = api.API(conf)
for name in api_obj.apis:
top_api.set_value(name, getattr(api_obj, name))
for model_name in models.__all__:
top_models.set_value(model_name, getattr(models, model_name))
| apache-2.0 |
Alwnikrotikz/los-cocos | cocos/__init__.py | 3 | 4046 | # ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2014 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''a framework for building 2D games, demos, and other graphical/interactive applications.
Main Features
-------------
* Flow control: Manage the flow control between different scenes in an easy way
* Sprites: Fast and easy sprites
* Actions: Just tell sprites what you want them to do. Composable actions like move, rotate, scale and much more
* Effects: Effects like waves, twirl, lens and much more
* Tiled Maps: Support for rectangular and hexagonal tiled maps
* Collision: Basic pure python support for collisions
* Transitions: Move from scene to scene with style
* Menus: Built in classes to create menus
* Text Rendering: Label and HTMLLabel with action support
* Documentation: Programming Guide + API Reference + Video Tutorials + Lots of simple tests showing how to use it
* Built-in Python Interpreter: For debugging purposes
* BSD License: Just use it
* Pyglet Based: No external dependencies
* OpenGL Based: Hardware Acceleration
http://cocos2d.org
'''
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
__version__ = "0.6.0"
__author__ = "cocos2d team"
version = __version__
import sys
# add the cocos resources path
import os, pyglet
pyglet.resource.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "resources")
)
pyglet.resource.reindex()
try:
unittesting = os.environ['cocos_utest']
except KeyError:
unittesting = False
del os, pyglet
# in windows we use the pygame package to get the SDL dlls
# we must get the path here because the inner pygame module will hide the real
if sys.platform == 'win32':
import imp
try:
dummy, sdl_lib_path, dummy = imp.find_module('pygame')
del dummy
except ImportError:
sdl_lib_path = None
def import_all():
import cocos.actions
import cocos.director
import cocos.layer
import cocos.menu
import cocos.sprite
import cocos.path
import cocos.scene
import cocos.grid
import cocos.text
import cocos.camera
import cocos.draw
import cocos.skeleton
import cocos.rect
import cocos.tiles
if not unittesting:
import_all()
| bsd-3-clause |
double12gzh/nova | nova/api/openstack/compute/contrib/flavor_disabled.py | 79 | 2223 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Flavor Disabled API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
authorize = extensions.soft_extension_authorizer('compute', 'flavor_disabled')
class FlavorDisabledController(wsgi.Controller):
def _extend_flavors(self, req, flavors):
for flavor in flavors:
db_flavor = req.get_db_flavor(flavor['id'])
key = "%s:disabled" % Flavor_disabled.alias
flavor[key] = db_flavor['disabled']
def _show(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
if 'flavor' in resp_obj.obj:
self._extend_flavors(req, [resp_obj.obj['flavor']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends(action='create')
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
self._extend_flavors(req, list(resp_obj.obj['flavors']))
class Flavor_disabled(extensions.ExtensionDescriptor):
"""Support to show the disabled status of a flavor."""
name = "FlavorDisabled"
alias = "OS-FLV-DISABLED"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_disabled/api/v1.1")
updated = "2012-08-29T00:00:00Z"
def get_controller_extensions(self):
controller = FlavorDisabledController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
| apache-2.0 |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/kfac/python/ops/fisher_factors.py | 13 | 66803 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FisherFactor definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import numpy as np
import six
from tensorflow.contrib.kfac.python.ops import linear_operator as lo
from tensorflow.contrib.kfac.python.ops import utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import moving_averages
from tensorflow.python.util import nest
# Whether to initialize covariance estimators at a zero matrix (or the identity
# matrix).
INIT_COVARIANCES_AT_ZERO = True
# Whether to zero-debias the moving averages.
ZERO_DEBIAS = True
# Whether to initialize inverse (and other such matrices computed from the cov
# matrices) to the zero matrix (or the identity matrix).
INIT_INVERSES_AT_ZERO = True
# When the number of inverses requested from a FisherFactor exceeds this value,
# the inverses are computed using an eigenvalue decomposition.
EIGENVALUE_DECOMPOSITION_THRESHOLD = 2
# Numerical eigenvalues computed from covariance matrix estimates are clipped to
# be at least as large as this value before they are used to compute inverses or
# matrix powers. Must be nonnegative.
EIGENVALUE_CLIPPING_THRESHOLD = 0.0
# Used to subsample the flattened extracted image patches. The number of
# outer products per row of the covariance matrix should not exceed this
# value. This parameter is used only if `_SUB_SAMPLE_OUTER_PRODUCTS` is True.
_MAX_NUM_OUTER_PRODUCTS_PER_COV_ROW = 1
# Used to subsample the inputs passed to the extract image patches. The batch
# size of number of inputs to extract image patches is multiplied by this
# factor. This parameter is used only if `_SUB_SAMPLE_INPUTS` is True.
_INPUTS_TO_EXTRACT_PATCHES_FACTOR = 0.5
# If True, then subsamples the tensor passed to compute the covaraince matrix.
_SUB_SAMPLE_OUTER_PRODUCTS = False
# If True, then subsamples the tensor passed to compute the covaraince matrix.
_SUB_SAMPLE_INPUTS = False
# TOWER_STRATEGY can be one of "concat" or "separate". If "concat", the data
# passed to the factors from the blocks will be concatenated across towers
# (lazilly via PartitionedTensor objects). Otherwise a tuple of tensors over
# towers will be passed in, and the factors will iterate over this and do the
# cov computations separately for each one, averaging the results together.
TOWER_STRATEGY = "concat"
def set_global_constants(init_covariances_at_zero=None,
zero_debias=None,
init_inverses_at_zero=None,
eigenvalue_decomposition_threshold=None,
eigenvalue_clipping_threshold=None,
max_num_outer_products_per_cov_row=None,
sub_sample_outer_products=None,
inputs_to_extract_patches_factor=None,
sub_sample_inputs=None,
tower_strategy=None):
"""Sets various global constants used by the classes in this module."""
global INIT_COVARIANCES_AT_ZERO
global ZERO_DEBIAS
global INIT_INVERSES_AT_ZERO
global EIGENVALUE_DECOMPOSITION_THRESHOLD
global EIGENVALUE_CLIPPING_THRESHOLD
global _MAX_NUM_OUTER_PRODUCTS_PER_COV_ROW
global _SUB_SAMPLE_OUTER_PRODUCTS
global _INPUTS_TO_EXTRACT_PATCHES_FACTOR
global _SUB_SAMPLE_INPUTS
global TOWER_STRATEGY
if init_covariances_at_zero is not None:
INIT_COVARIANCES_AT_ZERO = init_covariances_at_zero
if zero_debias is not None:
ZERO_DEBIAS = zero_debias
if init_inverses_at_zero is not None:
INIT_INVERSES_AT_ZERO = init_inverses_at_zero
if eigenvalue_decomposition_threshold is not None:
EIGENVALUE_DECOMPOSITION_THRESHOLD = eigenvalue_decomposition_threshold
if eigenvalue_clipping_threshold is not None:
EIGENVALUE_CLIPPING_THRESHOLD = eigenvalue_clipping_threshold
if max_num_outer_products_per_cov_row is not None:
_MAX_NUM_OUTER_PRODUCTS_PER_COV_ROW = max_num_outer_products_per_cov_row
if sub_sample_outer_products is not None:
_SUB_SAMPLE_OUTER_PRODUCTS = sub_sample_outer_products
if inputs_to_extract_patches_factor is not None:
_INPUTS_TO_EXTRACT_PATCHES_FACTOR = inputs_to_extract_patches_factor
if sub_sample_inputs is not None:
_SUB_SAMPLE_INPUTS = sub_sample_inputs
if tower_strategy is not None:
TOWER_STRATEGY = tower_strategy
def inverse_initializer(shape, dtype, partition_info=None): # pylint: disable=unused-argument
if INIT_INVERSES_AT_ZERO:
return array_ops.zeros(shape, dtype=dtype)
return linalg_ops.eye(num_rows=shape[0], dtype=dtype)
def covariance_initializer(shape, dtype, partition_info=None): # pylint: disable=unused-argument
if INIT_COVARIANCES_AT_ZERO:
return array_ops.zeros(shape, dtype=dtype)
return linalg_ops.eye(num_rows=shape[0], dtype=dtype)
def diagonal_covariance_initializer(shape, dtype, partition_info=None): # pylint: disable=unused-argument
if INIT_COVARIANCES_AT_ZERO:
return array_ops.zeros(shape, dtype=dtype)
return array_ops.ones(shape, dtype=dtype)
@contextlib.contextmanager
def place_on_device(device):
if device is not None and len(device):
with tf_ops.device(device):
yield
else:
yield
def compute_cov(tensor, tensor_right=None, normalizer=None):
"""Compute the empirical second moment of the rows of a 2D Tensor.
This function is meant to be applied to random matrices for which the true row
mean is zero, so that the true second moment equals the true covariance.
Args:
tensor: A 2D Tensor.
tensor_right: An optional 2D Tensor. If provided, this function computes
the matrix product tensor^T * tensor_right instead of tensor^T * tensor.
normalizer: optional scalar for the estimator (by default, the normalizer is
the number of rows of tensor).
Returns:
A square 2D Tensor with as many rows/cols as the number of input columns.
"""
if normalizer is None:
normalizer = array_ops.shape(tensor)[0]
if tensor_right is None:
cov = (
math_ops.matmul(tensor, tensor, transpose_a=True) / math_ops.cast(
normalizer, tensor.dtype))
return (cov + array_ops.transpose(cov)) / math_ops.cast(2.0, cov.dtype)
else:
return (math_ops.matmul(tensor, tensor_right, transpose_a=True) /
math_ops.cast(normalizer, tensor.dtype))
def append_homog(tensor):
"""Appends a homogeneous coordinate to the last dimension of a Tensor.
Args:
tensor: A Tensor.
Returns:
A Tensor identical to the input but one larger in the last dimension. The
new entries are filled with ones.
"""
rank = len(tensor.shape.as_list())
shape = array_ops.concat([array_ops.shape(tensor)[:-1], [1]], axis=0)
ones = array_ops.ones(shape, dtype=tensor.dtype)
return array_ops.concat([tensor, ones], axis=rank - 1)
def scope_string_from_params(params):
"""Builds a variable scope string name from the given parameters.
Supported parameters are:
* tensors
* booleans
* ints
* strings
* depth-1 tuples/lists of ints
* any depth tuples/lists of tensors
Other parameter types will throw an error.
Args:
params: A parameter or list of parameters.
Returns:
A string to use for the variable scope.
Raises:
ValueError: if params includes an unsupported type.
"""
params = params if isinstance(params, (tuple, list)) else (params,)
name_parts = []
for param in params:
if param is None:
name_parts.append("None")
elif isinstance(param, (tuple, list)):
if all([isinstance(p, int) for p in param]):
name_parts.append("-".join([str(p) for p in param]))
else:
name_parts.append(scope_string_from_name(param))
elif isinstance(param, (str, int, bool)):
name_parts.append(str(param))
elif isinstance(param, (tf_ops.Tensor, variables.Variable)):
name_parts.append(scope_string_from_name(param))
elif isinstance(param, utils.PartitionedTensor):
name_parts.append(scope_string_from_name(param.tensors))
else:
raise ValueError("Encountered an unsupported param type {}".format(
type(param)))
return "_".join(name_parts)
def scope_string_from_name(tensor):
if isinstance(tensor, (tuple, list)):
return "__".join([scope_string_from_name(t) for t in tensor])
# "gradients/add_4_grad/Reshape:0" -> "gradients_add_4_grad_Reshape"
return tensor.name.split(":")[0].replace("/", "_")
def scalar_or_tensor_to_string(val):
return repr(val) if np.isscalar(val) else scope_string_from_name(val)
def list_to_string(lst):
return "_".join(val if isinstance(val, six.string_types)
else scalar_or_tensor_to_string(val) for val in lst)
def graph_func_to_id(func):
"""Returns a hashable object that represents func's computation."""
# TODO(b/74201126): replace with Topohash of func's output
return func.func_id
def graph_func_to_string(func):
# TODO(b/74201126): replace with Topohash of func's output
return list_to_string(func.func_id)
def _subsample_for_cov_computation(array, name=None):
"""Subsamples the first dimension of the array.
`array`(A) is a tensor of shape `[batch_size, dim_2]`. Then the covariance
matrix(A^TA) is of shape `dim_2 ** 2`. Subsample only if the number of outer
products per row of the covariance matrix is greater than
`_MAX_NUM_OUTER_PRODUCTS_PER_COV_ROW`.
Args:
array: Tensor, of shape `[batch_size, dim_2]`.
name: `string`, Default(None)
Returns:
A tensor of shape `[max_samples, dim_2]`.
Raises:
ValueError: If array's is not matrix-shaped.
ValueError: If array's batch_size cannot be inferred.
"""
with tf_ops.name_scope(name, "subsample", [array]):
array = tf_ops.convert_to_tensor(array)
if len(array.shape) != 2:
raise ValueError("Input param array must be a matrix.")
batch_size = array.shape.as_list()[0]
if batch_size is None:
raise ValueError("Unable to get batch_size from input param array.")
num_cov_rows = array.shape.as_list()[-1]
max_batch_size = int(_MAX_NUM_OUTER_PRODUCTS_PER_COV_ROW * num_cov_rows)
if batch_size <= max_batch_size:
return array
return _random_tensor_gather(array, max_batch_size)
def _random_tensor_gather(array, max_size):
"""Generates a random set of indices and gathers the value at the indcices.
Args:
array: Tensor, of shape `[batch_size, dim_2]`.
max_size: int, Number of indices to sample.
Returns:
A tensor of shape `[max_size, ...]`.
"""
batch_size = array.shape.as_list()[0]
indices = random_ops.random_shuffle(math_ops.range(0, batch_size))[:max_size]
return array_ops.gather(array, indices)
@six.add_metaclass(abc.ABCMeta)
class FisherFactor(object):
"""Base class for objects modeling factors of approximate Fisher blocks.
A FisherFactor represents part of an approximate Fisher Information matrix.
For example, one approximation to the Fisher uses the Kronecker product of two
FisherFactors A and B, F = kron(A, B). FisherFactors are composed with
FisherBlocks to construct a block-diagonal approximation to the full Fisher.
FisherFactors are backed by a single, non-trainable variable that is updated
by running FisherFactor.make_covariance_update_op(). The shape and type of
this variable is implementation specific.
Note that for blocks that aren't based on approximations, a 'factor' can
be the entire block itself, as is the case for the diagonal and full
representations.
"""
def __init__(self):
self._cov = None
@abc.abstractproperty
def _var_scope(self):
"""Variable scope for this FisherFactor instance.
Returns:
string that unique identifies this FisherFactor instance.
"""
pass
@property
def name(self):
return self._var_scope
@abc.abstractproperty
def _cov_shape(self):
"""The shape of the variable backing this FisherFactor."""
pass
@abc.abstractproperty
def _num_sources(self):
"""The number of things to sum over when updating covariance variable.
The default make_covariance_update_op function will call _compute_new_cov
with indices ranging from 0 to _num_sources-1. The typical situation is
where the factor wants to sum the statistics it computes over multiple
backpropped "gradients" (typically passed in via "tensors" or
"outputs_grads" arguments).
"""
pass
@abc.abstractproperty
def _num_towers(self):
pass
@abc.abstractproperty
def _dtype(self):
"""dtype for variable backing this factor."""
pass
@property
def _cov_initializer(self):
"""Function for initializing covariance variable."""
return covariance_initializer
def instantiate_cov_variables(self):
"""Makes the internal cov variable(s)."""
assert self._cov is None
with variable_scope.variable_scope(self._var_scope):
self._cov = variable_scope.get_variable(
"cov",
initializer=self._cov_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype)
@abc.abstractmethod
def _compute_new_cov(self, source, tower):
"""Computes minibatch-estimated covariance for a single source.
Args:
source: int in [0, self._num_sources). Which source to use when computing
the cov update.
tower: int in [0, self._num_towers). Which tower to use when computing
the cov update.
Returns:
Tensor of same shape as self.get_cov().
"""
pass
def make_covariance_update_op(self, ema_decay):
"""Constructs and returns the covariance update Op.
Args:
ema_decay: The exponential moving average decay (float or Tensor).
Returns:
An Op for updating the covariance Variable referenced by _cov.
"""
new_cov_contribs = []
for source in range(self._num_sources):
for tower in range(self._num_towers):
device = (self._get_data_device(tower)
if TOWER_STRATEGY == "separate" else None)
with place_on_device(device):
new_cov_contribs.append(self._compute_new_cov(source, tower))
new_cov = math_ops.add_n(new_cov_contribs) / float(self._num_towers)
# Compute average of 'new_cov' across all TPU cores. On a TPU, each
# instance of 'new_cov' will be based on a different minibatch. This ensures
# that by the end of assign_moving_average(), all TPU cores see the same
# value for self._cov.
#
# Other implementations of make_covariance_update_op() that accumulate
# statistics in other variables should mimic this behavior.
if utils.on_tpu():
new_cov = utils.cross_replica_mean(new_cov)
return moving_averages.assign_moving_average(
self._cov, new_cov, ema_decay, zero_debias=ZERO_DEBIAS)
@abc.abstractmethod
def _get_data_device(self, tower):
pass
@abc.abstractmethod
def instantiate_inv_variables(self):
"""Makes the internal "inverse" variable(s)."""
pass
@abc.abstractmethod
def make_inverse_update_ops(self):
"""Create and return update ops corresponding to registered computations."""
pass
def get_cov(self):
return self._cov
@abc.abstractmethod
def get_cov_as_linear_operator(self):
pass
@abc.abstractmethod
def register_matpower(self, exp, damping_func):
pass
@abc.abstractmethod
def register_cholesky(self, damping_func):
pass
@abc.abstractmethod
def register_cholesky_inverse(self, damping_func):
pass
@abc.abstractmethod
def get_matpower(self, exp, damping_func):
pass
@abc.abstractmethod
def get_cholesky(self, damping_func):
pass
@abc.abstractmethod
def get_cholesky_inverse(self, damping_func):
pass
class DenseSquareMatrixFactor(FisherFactor):
"""Base class for FisherFactors that are stored as dense square matrices.
This class explicitly calculates and stores inverses of their `cov` matrices,
which must be square dense matrices.
Subclasses must implement the _compute_new_cov method, and the _var_scope and
_cov_shape properties.
"""
# TODO(b/69108481): This class (and its subclasses) should be refactored to
# serve the matrix quantities it computes as both (potentially stale)
# variables, updated by the inverse update ops, and fresh values stored in
# tensors that recomputed once every session.run() call. Currently matpower
# and damp_inverse have the former behavior, while eigendecomposition has
# the latter.
def __init__(self):
self._matpower_by_exp_and_damping = {} # { (float, hashable): variable }
self._matpower_registrations = set() # { (float, hashable) }
self._eigendecomp = None
self._damping_funcs_by_id = {} # {hashable: lambda}
self._cholesky_registrations = set() # { hashable }
self._cholesky_inverse_registrations = set() # { hashable }
self._cholesky_by_damping = {} # { hashable: variable }
self._cholesky_inverse_by_damping = {} # { hashable: variable }
super(DenseSquareMatrixFactor, self).__init__()
def get_cov_as_linear_operator(self):
assert self.get_cov().shape.ndims == 2
return lo.LinearOperatorFullMatrix(self.get_cov(),
is_self_adjoint=True,
is_square=True)
def _register_damping(self, damping_func):
damping_id = graph_func_to_id(damping_func)
if damping_id not in self._damping_funcs_by_id:
self._damping_funcs_by_id[damping_id] = damping_func
return damping_id
def register_inverse(self, damping_func):
# Just for backwards compatibility of some old code and tests
self.register_matpower(-1, damping_func)
def register_matpower(self, exp, damping_func):
"""Registers a matrix power to be maintained and served on demand.
This creates a variable and signals make_inverse_update_ops to make the
corresponding update op. The variable can be read via the method
get_matpower.
Args:
exp: float. The exponent to use in the matrix power.
damping_func: A function that computes a 0-D Tensor or a float which will
be the damping value used. i.e. damping = damping_func().
"""
if exp == 1.0:
return
damping_id = self._register_damping(damping_func)
if (exp, damping_id) not in self._matpower_registrations:
self._matpower_registrations.add((exp, damping_id))
def register_cholesky(self, damping_func):
"""Registers a Cholesky factor to be maintained and served on demand.
This creates a variable and signals make_inverse_update_ops to make the
corresponding update op. The variable can be read via the method
get_cholesky.
Args:
damping_func: A function that computes a 0-D Tensor or a float which will
be the damping value used. i.e. damping = damping_func().
"""
damping_id = self._register_damping(damping_func)
if damping_id not in self._cholesky_registrations:
self._cholesky_registrations.add(damping_id)
def register_cholesky_inverse(self, damping_func):
"""Registers an inverse Cholesky factor to be maintained/served on demand.
This creates a variable and signals make_inverse_update_ops to make the
corresponding update op. The variable can be read via the method
get_cholesky_inverse.
Args:
damping_func: A function that computes a 0-D Tensor or a float which will
be the damping value used. i.e. damping = damping_func().
"""
damping_id = self._register_damping(damping_func)
if damping_id not in self._cholesky_inverse_registrations:
self._cholesky_inverse_registrations.add(damping_id)
def instantiate_inv_variables(self):
"""Makes the internal "inverse" variable(s)."""
for (exp, damping_id) in self._matpower_registrations:
exp_string = scalar_or_tensor_to_string(exp)
damping_func = self._damping_funcs_by_id[damping_id]
damping_string = graph_func_to_string(damping_func)
with variable_scope.variable_scope(self._var_scope):
matpower = variable_scope.get_variable(
"matpower_exp{}_damp{}".format(exp_string, damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype)
assert (exp, damping_id) not in self._matpower_by_exp_and_damping
self._matpower_by_exp_and_damping[(exp, damping_id)] = matpower
for damping_id in self._cholesky_registrations:
damping_func = self._damping_funcs_by_id[damping_id]
damping_string = graph_func_to_string(damping_func)
with variable_scope.variable_scope(self._var_scope):
chol = variable_scope.get_variable(
"cholesky_damp{}".format(damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype)
assert damping_id not in self._cholesky_by_damping
self._cholesky_by_damping[damping_id] = chol
for damping_id in self._cholesky_inverse_registrations:
damping_func = self._damping_funcs_by_id[damping_id]
damping_string = graph_func_to_string(damping_func)
with variable_scope.variable_scope(self._var_scope):
cholinv = variable_scope.get_variable(
"cholesky_inverse_damp{}".format(damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype)
assert damping_id not in self._cholesky_inverse_by_damping
self._cholesky_inverse_by_damping[damping_id] = cholinv
def make_inverse_update_ops(self):
"""Create and return update ops corresponding to registered computations."""
ops = []
num_inverses = sum(1 for (exp, _) in self._matpower_by_exp_and_damping
if exp == -1)
num_other_matpower = len(self._matpower_by_exp_and_damping) - num_inverses
other_matrix_power_registered = num_other_matpower >= 1
use_eig = (
self._eigendecomp or other_matrix_power_registered or
num_inverses >= EIGENVALUE_DECOMPOSITION_THRESHOLD)
# We precompute these so we don't need to evaluate them multiple times (for
# each matrix power that uses them)
damping_value_by_id = {damping_id: math_ops.cast(
self._damping_funcs_by_id[damping_id](), self._dtype)
for damping_id in self._damping_funcs_by_id}
if use_eig:
eigenvalues, eigenvectors = self.get_eigendecomp() # pylint: disable=unpacking-non-sequence
for (exp, damping_id), matpower in (
self._matpower_by_exp_and_damping.items()):
damping = damping_value_by_id[damping_id]
ops.append(
matpower.assign(
math_ops.matmul(eigenvectors *
(eigenvalues + damping)**exp,
array_ops.transpose(eigenvectors))))
# These ops share computation and should be run on a single device.
ops = [control_flow_ops.group(*ops)]
else:
for (exp, damping_id), matpower in (
self._matpower_by_exp_and_damping.items()):
assert exp == -1
damping = damping_value_by_id[damping_id]
ops.append(matpower.assign(utils.posdef_inv(self.get_cov(), damping)))
# TODO(b/77902055): If inverses are being computed with Cholesky's
# we can share the work. Instead this code currently just computes the
# Cholesky a second time. It does at least share work between requests for
# Cholesky's and Cholesky inverses with the same damping id.
for damping_id, cholesky_inv in self._cholesky_inverse_by_damping.items():
cholesky_ops = []
damping = damping_value_by_id[damping_id]
cholesky_value = utils.cholesky(self.get_cov(), damping)
if damping_id in self._cholesky_by_damping:
cholesky = self._cholesky_by_damping[damping_id]
cholesky_ops.append(cholesky.assign(cholesky_value))
identity = linalg_ops.eye(cholesky_value.shape.as_list()[0],
dtype=cholesky_value.dtype)
cholesky_inv_value = linalg_ops.matrix_triangular_solve(cholesky_value,
identity)
cholesky_ops.append(cholesky_inv.assign(cholesky_inv_value))
ops.append(control_flow_ops.group(*cholesky_ops))
for damping_id, cholesky in self._cholesky_by_damping.items():
if damping_id not in self._cholesky_inverse_by_damping:
damping = damping_value_by_id[damping_id]
cholesky_value = utils.cholesky(self.get_cov(), damping)
ops.append(cholesky.assign(cholesky_value))
self._eigendecomp = False
return ops
def get_inverse(self, damping_func):
# Just for backwards compatibility of some old code and tests
return self.get_matpower(-1, damping_func)
def get_matpower(self, exp, damping_func):
# Note that this function returns a variable which gets updated by the
# inverse ops. It may be stale / inconsistent with the latest value of
# get_cov().
if exp != 1:
damping_id = graph_func_to_id(damping_func)
matpower = self._matpower_by_exp_and_damping[(exp, damping_id)]
else:
matpower = self.get_cov()
identity = linalg_ops.eye(matpower.shape.as_list()[0],
dtype=matpower.dtype)
matpower += math_ops.cast(damping_func(), dtype=matpower.dtype)*identity
assert matpower.shape.ndims == 2
return lo.LinearOperatorFullMatrix(matpower,
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
def get_cholesky(self, damping_func):
# Note that this function returns a variable which gets updated by the
# inverse ops. It may be stale / inconsistent with the latest value of
# get_cov().
damping_id = graph_func_to_id(damping_func)
cholesky = self._cholesky_by_damping[damping_id]
assert cholesky.shape.ndims == 2
return lo.LinearOperatorFullMatrix(cholesky,
is_non_singular=True,
is_square=True)
def get_cholesky_inverse(self, damping_func):
# Note that this function returns a variable which gets updated by the
# inverse ops. It may be stale / inconsistent with the latest value of
# get_cov().
damping_id = graph_func_to_id(damping_func)
cholesky_inv = self._cholesky_inverse_by_damping[damping_id]
assert cholesky_inv.shape.ndims == 2
return lo.LinearOperatorFullMatrix(cholesky_inv,
is_non_singular=True,
is_square=True)
def get_eigendecomp(self):
"""Creates or retrieves eigendecomposition of self._cov."""
# Unlike get_matpower this doesn't retrieve a stored variable, but instead
# always computes a fresh version from the current value of get_cov().
if not self._eigendecomp:
eigenvalues, eigenvectors = linalg_ops.self_adjoint_eig(self.get_cov())
# The matrix self._cov is positive semidefinite by construction, but the
# numerical eigenvalues could be negative due to numerical errors, so here
# we clip them to be at least FLAGS.eigenvalue_clipping_threshold
clipped_eigenvalues = math_ops.maximum(eigenvalues,
EIGENVALUE_CLIPPING_THRESHOLD)
self._eigendecomp = (clipped_eigenvalues, eigenvectors)
return self._eigendecomp
class FullFactor(DenseSquareMatrixFactor):
"""FisherFactor for a full matrix representation of the Fisher of a parameter.
Note that this uses the naive "square the sum estimator", and so is applicable
to any type of parameter in principle, but has very high variance.
"""
def __init__(self,
params_grads,
batch_size):
self._batch_size = batch_size
self._params_grads = tuple(utils.ensure_sequence(params_grad)
for params_grad in params_grads)
super(FullFactor, self).__init__()
@property
def _var_scope(self):
return "ff_full_" + scope_string_from_params(
[self._params_grads, self._batch_size])
@property
def _cov_shape(self):
size = sum(param_grad.shape.num_elements()
for param_grad in self._params_grads[0])
return (size, size)
@property
def _num_sources(self):
return len(self._params_grads)
@property
def _num_towers(self):
return 1
@property
def _dtype(self):
return self._params_grads[0][0].dtype
def _compute_new_cov(self, source, tower):
assert tower == 0
# This will be a very basic rank 1 estimate
params_grads_flat = utils.tensors_to_column(self._params_grads[source])
return ((params_grads_flat * array_ops.transpose(
params_grads_flat)) / math_ops.cast(self._batch_size,
params_grads_flat.dtype))
def _get_data_device(self, tower):
return None
class DiagonalFactor(FisherFactor):
"""A base class for FisherFactors that use diagonal approximations.
A DiagonalFactor's covariance variable can be of any shape, but must contain
exactly one entry per parameter.
"""
def __init__(self):
super(DiagonalFactor, self).__init__()
def get_cov_as_linear_operator(self):
assert self._matrix_diagonal.shape.ndims == 1
return lo.LinearOperatorDiag(self._matrix_diagonal,
is_self_adjoint=True,
is_square=True)
@property
def _cov_initializer(self):
return diagonal_covariance_initializer
@property
def _matrix_diagonal(self):
return array_ops.reshape(self.get_cov(), [-1])
def make_inverse_update_ops(self):
return []
def instantiate_inv_variables(self):
pass
def register_matpower(self, exp, damping_func):
pass
def register_cholesky(self, damping_func):
pass
def register_cholesky_inverse(self, damping_func):
pass
def get_matpower(self, exp, damping_func):
matpower_diagonal = (self._matrix_diagonal
+ math_ops.cast(damping_func(), self._dtype))**exp
return lo.LinearOperatorDiag(matpower_diagonal,
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
def get_cholesky(self, damping_func):
return self.get_matpower(0.5, damping_func)
def get_cholesky_inverse(self, damping_func):
return self.get_matpower(-0.5, damping_func)
class NaiveDiagonalFactor(DiagonalFactor):
"""FisherFactor for a diagonal approximation of any type of param's Fisher.
Note that this uses the naive "square the sum estimator", and so is applicable
to any type of parameter in principle, but has very high variance.
"""
def __init__(self,
params_grads,
batch_size):
"""Initializes NaiveDiagonalFactor instance.
Args:
params_grads: Sequence of Tensors, each with same shape as parameters this
FisherFactor corresponds to. For example, the gradient of the loss with
respect to parameters.
batch_size: int or 0-D Tensor. Size
"""
self._params_grads = tuple(utils.ensure_sequence(params_grad)
for params_grad in params_grads)
self._batch_size = batch_size
super(NaiveDiagonalFactor, self).__init__()
@property
def _var_scope(self):
return "ff_naivediag_" + scope_string_from_params(
[self._params_grads, self._batch_size])
@property
def _cov_shape(self):
size = sum(param_grad.shape.num_elements()
for param_grad in self._params_grads[0])
return [size, 1]
@property
def _num_sources(self):
return len(self._params_grads)
@property
def _num_towers(self):
return 1
@property
def _dtype(self):
return self._params_grads[0][0].dtype
def _compute_new_cov(self, source, tower):
assert tower == 0
params_grads_flat = utils.tensors_to_column(self._params_grads[source])
return (math_ops.square(params_grads_flat) / math_ops.cast(
self._batch_size, params_grads_flat.dtype))
def _get_data_device(self, tower):
return None
class EmbeddingInputKroneckerFactor(DiagonalFactor):
r"""FisherFactor for input to an embedding layer.
Given input_ids = [batch_size, input_size] representing indices into an
[vocab_size, embedding_size] embedding matrix, approximate input covariance by
a diagonal matrix,
Cov(input_ids, input_ids) =
(1/batch_size) sum_{i} diag(n_hot(input[i]) ** 2).
where n_hot() constructs an n-hot binary vector and diag() constructs a
diagonal matrix of size [vocab_size, vocab_size].
"""
def __init__(self, input_ids, vocab_size, dtype=None):
"""Instantiate EmbeddingInputKroneckerFactor.
Args:
input_ids: List of Tensors of shape [batch_size, input_size] and dtype
int32. Indices into embedding matrix. List index is tower.
vocab_size: int or 0-D Tensor. Maximum value for entries in 'input_ids'.
dtype: dtype for covariance statistics. Must be a floating point type.
Defaults to float32.
"""
self._input_ids = input_ids
self._vocab_size = vocab_size
self._cov_dtype = dtype or dtypes.float32
super(EmbeddingInputKroneckerFactor, self).__init__()
@property
def _var_scope(self):
return "ff_diag_embedding_" + scope_string_from_params(self._input_ids)
@property
def _cov_shape(self):
return [self._vocab_size]
@property
def _num_sources(self):
return 1
@property
def _num_towers(self):
return len(self._input_ids)
@property
def _dtype(self):
return self._cov_dtype
def _compute_new_cov(self, source, tower):
assert source == 0
input_ids = self._input_ids[tower]
if len(input_ids.shape) > 2:
raise ValueError(
"Input to embeddings must have rank <= 2. Found rank %d." % len(
input_ids.shape))
batch_size = array_ops.shape(input_ids)[0]
# Transform indices into one-hot vectors.
#
# TODO(b/72714822): There must be a faster way to construct the diagonal
# covariance matrix! This operation is O(batch_size * vocab_size), where
# it should be O(batch_size * input_size).
flat_input_ids = array_ops.reshape(input_ids, [-1])
one_hots = array_ops.one_hot(flat_input_ids,
self._vocab_size) # [?, vocab_size]
# Take average across examples. Note that, because all entries have
# magnitude zero or one, there's no need to square the entries.
#
# TODO(b/72714822): Support for SparseTensor, other kinds of aggregation
# within an example such as average.
#
# TODO(b/72714822): Support for partitioned embeddings.
new_cov = math_ops.reduce_sum(one_hots, axis=0) # [vocab_size]
new_cov /= math_ops.cast(batch_size, new_cov.dtype)
return new_cov
def _get_data_device(self, tower):
return self._input_ids[tower].device
class FullyConnectedDiagonalFactor(DiagonalFactor):
r"""FisherFactor for a diagonal approx of a fully-connected layer's Fisher.
Given in = [batch_size, input_size] and out_grad = [batch_size, output_size],
approximates the covariance as,
Cov(in, out) = (1/batch_size) sum_{i} outer(in[i], out_grad[i]) ** 2.0
where the square is taken element-wise.
"""
def __init__(self,
inputs,
outputs_grads,
has_bias=False):
"""Instantiate FullyConnectedDiagonalFactor.
Args:
inputs: List of Tensors of shape [batch_size, input_size]. Inputs to this
layer. List index is towers.
outputs_grads: List of Tensors, each of shape [batch_size, output_size],
which are the gradients of the loss with respect to the layer's
outputs. First index is source, second is tower.
has_bias: bool. If True, append '1' to each input.
"""
self._inputs = inputs
self._has_bias = has_bias
self._outputs_grads = outputs_grads
self._squared_inputs = None
super(FullyConnectedDiagonalFactor, self).__init__()
@property
def _var_scope(self):
return "ff_diagfc_" + scope_string_from_params(
tuple(self._inputs) + tuple(nest.flatten(self._outputs_grads)))
@property
def _cov_shape(self):
input_size = self._inputs[0].shape[1] + self._has_bias
output_size = self._outputs_grads[0][0].shape[1]
return [input_size, output_size]
@property
def _num_sources(self):
return len(self._outputs_grads)
@property
def _num_towers(self):
return len(self._inputs)
@property
def _dtype(self):
return self._outputs_grads[0][0].dtype
def make_covariance_update_op(self, ema_decay):
self._squared_inputs = []
for tower in range(self._num_towers):
inputs = self._inputs[tower]
with place_on_device(self._get_data_device(tower)):
if self._has_bias:
inputs = append_homog(inputs)
self._squared_inputs.append(math_ops.square(inputs))
return super(FullyConnectedDiagonalFactor, self).make_covariance_update_op(
ema_decay)
def _compute_new_cov(self, source, tower):
batch_size = array_ops.shape(self._squared_inputs[tower])[0]
outputs_grad = self._outputs_grads[source][tower]
# The well-known special formula that uses the fact that the entry-wise
# square of an outer product is the outer-product of the entry-wise squares.
# The gradient is the outer product of the input and the output gradients,
# so we just square both and then take their outer-product.
new_cov = math_ops.matmul(
self._squared_inputs[tower],
math_ops.square(outputs_grad),
transpose_a=True)
new_cov /= math_ops.cast(batch_size, new_cov.dtype)
return new_cov
def _get_data_device(self, tower):
return self._inputs[tower].device
class ConvDiagonalFactor(DiagonalFactor):
"""FisherFactor for a diagonal approx of a convolutional layer's Fisher."""
def __init__(self,
inputs,
outputs_grads,
filter_shape,
strides,
padding,
data_format=None,
dilations=None,
has_bias=False):
"""Creates a ConvDiagonalFactor object.
Args:
inputs: List of Tensors of shape [batch_size, height, width, in_channels].
Input activations to this layer. List index is towers.
outputs_grads: List of Tensors, each of shape [batch_size,
height, width, out_channels], which are the gradients of the loss
with respect to the layer's outputs. First index is source, second
index is tower.
filter_shape: Tuple of 4 ints: (kernel_height, kernel_width, in_channels,
out_channels). Represents shape of kernel used in this layer.
strides: The stride size in this layer (1-D Tensor of length 4).
padding: The padding in this layer (1-D of Tensor length 4).
data_format: None or str. Format of conv2d inputs.
dilations: None or tuple of 4 ints.
has_bias: Python bool. If True, the layer is assumed to have a bias
parameter in addition to its filter parameter.
Raises:
ValueError: If inputs, output_grads, and filter_shape do not agree on
in_channels or out_channels.
ValueError: If strides, dilations are not length-4 lists of ints.
ValueError: If data_format does not put channel last.
"""
if not utils.is_data_format_channel_last(data_format):
raise ValueError("Channel must be last.")
if any(input_.shape.ndims != 4 for input_ in inputs):
raise ValueError("inputs must be a list of 4-D Tensors.")
if any(input_.shape.as_list()[-1] != filter_shape[-2] for input_ in inputs):
raise ValueError("inputs and filter_shape must agree on in_channels.")
for i, outputs_grad in enumerate(outputs_grads):
if any(output_grad.shape.ndims != 4 for output_grad in outputs_grad):
raise ValueError("outputs[%d] must be 4-D Tensor." % i)
if any(output_grad.shape.as_list()[-1] != filter_shape[-1]
for output_grad in outputs_grad):
raise ValueError(
"outputs[%d] and filter_shape must agree on out_channels." % i)
if len(strides) != 4:
raise ValueError("strides must be length-4 list of ints.")
if dilations is not None and len(dilations) != 4:
raise ValueError("dilations must be length-4 list of ints.")
self._inputs = inputs
self._outputs_grads = outputs_grads
self._filter_shape = filter_shape
self._strides = strides
self._padding = padding
self._data_format = data_format
self._dilations = dilations
self._has_bias = has_bias
self._patches = None
super(ConvDiagonalFactor, self).__init__()
@property
def _var_scope(self):
return "ff_convdiag_" + scope_string_from_params(
tuple(self._inputs) + tuple(nest.flatten(self._outputs_grads)))
@property
def _cov_shape(self):
filter_height, filter_width, in_channels, out_channels = self._filter_shape
return [
filter_height * filter_width * in_channels + self._has_bias,
out_channels
]
@property
def _num_sources(self):
return len(self._outputs_grads)
@property
def _num_towers(self):
return len(self._inputs)
@property
def _dtype(self):
return self._inputs[0].dtype
def make_covariance_update_op(self, ema_decay):
filter_height, filter_width, _, _ = self._filter_shape
# TODO(b/64144716): there is potential here for a big savings in terms
# of memory use.
if self._dilations is None:
rates = (1, 1, 1, 1)
else:
rates = tuple(self._dilations)
self._patches = []
for tower in range(self._num_towers):
with place_on_device(self._get_data_device(tower)):
patches = array_ops.extract_image_patches(
self._inputs[tower],
ksizes=[1, filter_height, filter_width, 1],
strides=self._strides,
rates=rates,
padding=self._padding)
if self._has_bias:
patches = append_homog(patches)
self._patches.append(patches)
return super(ConvDiagonalFactor, self).make_covariance_update_op(ema_decay)
def _compute_new_cov(self, source, tower):
patches = self._patches[tower]
batch_size = array_ops.shape(patches)[0]
outputs_grad = self._outputs_grads[source][tower]
new_cov = self._convdiag_sum_of_squares(patches, outputs_grad)
new_cov /= math_ops.cast(batch_size, new_cov.dtype)
return new_cov
def _convdiag_sum_of_squares(self, patches, outputs_grad):
# This computes the sum of the squares of the per-training-case "gradients".
# It does this simply by computing a giant tensor containing all of these,
# doing an entry-wise square, and them summing along the batch dimension.
case_wise_gradients = special_math_ops.einsum("bijk,bijl->bkl", patches,
outputs_grad)
return math_ops.reduce_sum(math_ops.square(case_wise_gradients), axis=0)
def _get_data_device(self, tower):
return self._inputs[tower].device
class FullyConnectedKroneckerFactor(DenseSquareMatrixFactor):
"""Kronecker factor for the input or output side of a fully-connected layer.
"""
def __init__(self,
tensors,
has_bias=False):
"""Instantiate FullyConnectedKroneckerFactor.
Args:
tensors: List of list of Tensors, each of shape [batch_size, n]. The
Tensors are typically either a layer's inputs or its output's gradients.
The first list index is source, the second is tower.
has_bias: bool. If True, append '1' to each row.
"""
# The tensor argument is either a tensor of input activations or a tensor of
# output pre-activation gradients.
self._has_bias = has_bias
self._tensors = tensors
super(FullyConnectedKroneckerFactor, self).__init__()
@property
def _var_scope(self):
return "ff_fckron_" + scope_string_from_params(
tuple(nest.flatten(self._tensors)) + (self._has_bias,))
@property
def _cov_shape(self):
size = self._tensors[0][0].shape[1] + self._has_bias
return [size, size]
@property
def _num_sources(self):
return len(self._tensors)
@property
def _num_towers(self):
return len(self._tensors[0])
@property
def _dtype(self):
return self._tensors[0][0].dtype
def _compute_new_cov(self, source, tower):
tensor = self._tensors[source][tower]
if self._has_bias:
tensor = append_homog(tensor)
return compute_cov(tensor)
def _get_data_device(self, tower):
return self._tensors[0][tower].device
class ConvInputKroneckerFactor(DenseSquareMatrixFactor):
r"""Kronecker factor for the input side of a convolutional layer.
Estimates E[ a a^T ] where a is the inputs to a convolutional layer given
example x. Expectation is taken over all examples and locations.
Equivalent to Omega in https://arxiv.org/abs/1602.01407 for details. See
Section 3.1 Estimating the factors.
"""
def __init__(self,
inputs,
filter_shape,
padding,
strides=None,
dilation_rate=None,
data_format=None,
extract_patches_fn=None,
has_bias=False,
sub_sample_inputs=None,
sub_sample_patches=None):
"""Initializes ConvInputKroneckerFactor.
Args:
inputs: List of Tensors of shape [batch_size, ..spatial_input_size..,
in_channels]. Inputs to layer. List index is tower.
filter_shape: List of ints. Contains [..spatial_filter_size..,
in_channels, out_channels]. Shape of convolution kernel.
padding: str. Padding method for layer. "SAME" or "VALID".
strides: List of ints or None. Contains [..spatial_filter_strides..] if
'extract_patches_fn' is compatible with tf.nn.convolution(), else
[1, ..spatial_filter_strides, 1].
dilation_rate: List of ints or None. Rate for dilation along each spatial
dimension if 'extract_patches_fn' is compatible with
tf.nn.convolution(), else [1, ..spatial_dilation_rates.., 1].
data_format: str or None. Format of input data.
extract_patches_fn: str or None. Name of function that extracts image
patches. One of "extract_convolution_patches", "extract_image_patches",
"extract_pointwise_conv2d_patches".
has_bias: bool. If True, append 1 to in_channel.
sub_sample_inputs: `bool`. If True, then subsample the inputs from which
the image patches are extracted. (Default: None)
sub_sample_patches: `bool`, If `True` then subsample the extracted
patches.(Default: None)
"""
self._inputs = inputs
self._filter_shape = filter_shape
self._strides = strides
self._padding = padding
self._dilation_rate = dilation_rate
self._data_format = data_format
self._extract_patches_fn = extract_patches_fn
self._has_bias = has_bias
if sub_sample_inputs is None:
self._sub_sample_inputs = _SUB_SAMPLE_INPUTS
else:
self._sub_sample_inputs = sub_sample_inputs
if sub_sample_patches is None:
self._sub_sample_patches = _SUB_SAMPLE_OUTER_PRODUCTS
else:
self._sub_sample_patches = sub_sample_patches
super(ConvInputKroneckerFactor, self).__init__()
@property
def _var_scope(self):
return "ff_convinkron_" + scope_string_from_params(
tuple(self._inputs) +
tuple((self._filter_shape, self._strides, self._padding,
self._dilation_rate, self._data_format, self._has_bias)))
@property
def _cov_shape(self):
spatial_filter_shape = self._filter_shape[0:-2]
in_channels = self._filter_shape[-2]
size = np.prod(spatial_filter_shape) * in_channels + self._has_bias
return [size, size]
@property
def _num_sources(self):
return 1
@property
def _num_towers(self):
return len(self._inputs)
@property
def _dtype(self):
return self._inputs[0].dtype
def _compute_new_cov(self, source, tower):
assert source == 0
inputs = self._inputs[tower]
if self._sub_sample_inputs:
batch_size = inputs.shape.as_list()[0]
max_size = int(batch_size * _INPUTS_TO_EXTRACT_PATCHES_FACTOR)
inputs = _random_tensor_gather(inputs, max_size)
# TODO(b/64144716): there is potential here for a big savings in terms of
# memory use.
if self._extract_patches_fn in [None, "extract_convolution_patches"]:
patches = utils.extract_convolution_patches(
inputs,
self._filter_shape,
padding=self._padding,
strides=self._strides,
dilation_rate=self._dilation_rate,
data_format=self._data_format)
elif self._extract_patches_fn == "extract_image_patches":
assert inputs.shape.ndims == 4
assert len(self._filter_shape) == 4
assert len(self._strides) == 4, self._strides
if self._dilation_rate is None:
rates = [1, 1, 1, 1]
else:
rates = self._dilation_rate
assert len(rates) == 4
assert rates[0] == rates[-1] == 1
patches = array_ops.extract_image_patches(
inputs,
ksizes=[1] + list(self._filter_shape[0:-2]) + [1],
strides=self._strides,
rates=rates,
padding=self._padding)
elif self._extract_patches_fn == "extract_pointwise_conv2d_patches":
assert self._strides in [None, [1, 1, 1, 1], (1, 1, 1, 1)]
assert self._filter_shape[0] == self._filter_shape[1] == 1
patches = utils.extract_pointwise_conv2d_patches(
inputs, self._filter_shape, data_format=None)
else:
raise NotImplementedError(self._extract_patches_fn)
flatten_size = np.prod(self._filter_shape[0:-1])
# patches_flat below is the matrix [[A_l]] from the KFC paper (tilde
# omitted over A for clarity). It has shape M|T| x J|Delta| (eq. 14),
# where M = minibatch size, |T| = number of spatial locations,
# |Delta| = number of spatial offsets, and J = number of input maps
# for convolutional layer l.
patches_flat = array_ops.reshape(patches, [-1, flatten_size])
# We append a homogenous coordinate to patches_flat if the layer has
# bias parameters. This gives us [[A_l]]_H from the paper.
if self._sub_sample_patches:
patches_flat = _subsample_for_cov_computation(patches_flat)
if self._has_bias:
patches_flat = append_homog(patches_flat)
# We call compute_cov without passing in a normalizer. compute_cov uses
# the first dimension of patches_flat i.e. M|T| as the normalizer by
# default. Hence we end up computing 1/M|T| * [[A_l]]^T [[A_l]], with
# shape J|Delta| x J|Delta|. This is related to hat{Omega}_l from
# the paper but has a different scale here for consistency with
# ConvOutputKroneckerFactor.
# (Tilde omitted over A for clarity.)
return compute_cov(patches_flat)
def _get_data_device(self, tower):
return self._inputs[tower].device
class ConvOutputKroneckerFactor(DenseSquareMatrixFactor):
r"""Kronecker factor for the output side of a convolutional layer.
Estimates E[ ds ds^T ] where s is the preactivations of a convolutional layer
given example x and ds = (d / d s) log(p(y|x, w)). Expectation is taken over
all examples and locations.
Equivalent to Gamma in https://arxiv.org/abs/1602.01407 for details. See
Section 3.1 Estimating the factors.
"""
def __init__(self, outputs_grads, data_format=None):
"""Initializes ConvOutputKroneckerFactor.
Args:
outputs_grads: List of list of Tensors. Each Tensor is of shape
[batch_size, ..spatial_input_size.., out_channels]. First list index
is source, the second is tower.
data_format: None or str. Format of outputs_grads.
Raises:
ValueError: If channels are not final dimension.
"""
if not utils.is_data_format_channel_last(data_format):
raise ValueError("Channel must be last.")
self._out_channels = outputs_grads[0][0].shape.as_list()[-1]
self._outputs_grads = outputs_grads
super(ConvOutputKroneckerFactor, self).__init__()
@property
def _var_scope(self):
return "ff_convoutkron_" + scope_string_from_params(
nest.flatten(self._outputs_grads))
@property
def _cov_shape(self):
size = self._out_channels
return [size, size]
@property
def _num_sources(self):
return len(self._outputs_grads)
@property
def _num_towers(self):
return len(self._outputs_grads[0])
@property
def _dtype(self):
return self._outputs_grads[0][0].dtype
def _compute_new_cov(self, source, tower):
outputs_grad = self._outputs_grads[source][tower]
# reshaped_tensor below is the matrix DS_l defined in the KFC paper
# (tilde omitted over S for clarity). It has shape M|T| x I, where
# M = minibatch size, |T| = number of spatial locations, and
# I = number of output maps for convolutional layer l.
reshaped_tensor = array_ops.reshape(outputs_grad, [-1, self._out_channels])
# Following the reasoning in ConvInputKroneckerFactor._compute_new_cov,
# compute_cov here returns 1/M|T| * DS_l^T DS_l = hat{Gamma}_l
# as defined in the paper, with shape I x I.
# (Tilde omitted over S for clarity.)
return compute_cov(reshaped_tensor)
def _get_data_device(self, tower):
return self._outputs_grads[0][tower].device
class FullyConnectedMultiKF(FullyConnectedKroneckerFactor):
"""Kronecker factor for a fully connected layer used multiple times."""
def __init__(self,
tensors,
num_uses=None,
has_bias=False):
"""Constructs a new `FullyConnectedMultiKF`.
Args:
tensors: List of list of Tensors of shape, each of shape
[num_uses * batch_size, n], and is a reshape version of a Tensor of
shape [num_uses, batch_size, n]. Each of these tensors is usually a
layer's inputs or its output's gradients. The first list index is
sources, the second is towers.
num_uses: int. The number of time-steps / uses.
has_bias: bool. If True, '1' is appended to each row.
"""
self._num_uses = num_uses
self._cov_dt1 = None
self._make_cov_dt1 = False
self._option1quants_by_damping = {}
self._option2quants_by_damping = {}
self._option1quants_registrations = set()
self._option2quants_registrations = set()
super(FullyConnectedMultiKF, self).__init__(tensors=tensors,
has_bias=has_bias)
@property
def _num_timesteps(self):
return self._num_uses
@property
def _var_scope(self):
return "ff_fc_multi_" + scope_string_from_params(
tuple(nest.flatten(self._tensors))
+ (self._num_timesteps, self._has_bias,))
def make_covariance_update_op(self, ema_decay):
op = super(FullyConnectedMultiKF, self).make_covariance_update_op(ema_decay)
if self._cov_dt1 is not None:
new_cov_dt1_contribs = []
for source in range(self._num_sources):
for tower in range(self._num_towers):
with place_on_device(self._get_data_device(tower)):
new_cov_dt1_contribs.append(self._compute_new_cov_dt1(source,
tower))
new_cov_dt1 = (math_ops.add_n(new_cov_dt1_contribs)
/ float(self._num_towers))
# See comments in FisherFactor.make_covariance_update_op() for details.
if utils.on_tpu():
new_cov_dt1 = utils.cross_replica_mean(new_cov_dt1)
op2 = moving_averages.assign_moving_average(
self._cov_dt1, new_cov_dt1, ema_decay, zero_debias=ZERO_DEBIAS)
# TODO(b/69112164):
# It's important that _cov and _cov_dt1 remain consistent with each
# other while the inverse ops are happening. How can we ensure this?
# We will need to add explicit synchronization for this to
# work with asynchronous training.
op = control_flow_ops.group(op, op2)
return op
def _compute_new_cov_dt1(self, source, tower): # pylint: disable=missing-docstring
tensor = self._tensors[source][tower]
if self._has_bias:
# This appending is technically done twice (the other time is for
# _compute_new_cov())
tensor = append_homog(tensor)
total_len = array_ops.shape(tensor)[0]
batch_size = total_len // self._num_timesteps
tensor_present = tensor[:-batch_size, :]
tensor_future = tensor[batch_size:, :]
# We specify a normalizer for this computation to ensure a PSD Fisher
# block estimate. This is equivalent to padding with zeros, as was done
# in Section B.2 of the appendix.
return compute_cov(
tensor_future, tensor_right=tensor_present, normalizer=total_len)
def _get_data_device(self, tower):
return self._tensors[0][tower].device
@property
def _vec_shape(self):
size = self._tensors[0][0].shape[1] + self._has_bias
return [size]
def get_option1quants(self, damping_func):
damping_id = graph_func_to_id(damping_func)
return self._option1quants_by_damping[damping_id]
def get_option2quants(self, damping_func):
damping_id = graph_func_to_id(damping_func)
return self._option2quants_by_damping[damping_id]
def get_cov_dt1(self):
assert self._cov_dt1 is not None
return self._cov_dt1
def register_cov_dt1(self):
self._make_cov_dt1 = True
def instantiate_cov_variables(self):
super(FullyConnectedMultiKF, self).instantiate_cov_variables()
assert self._cov_dt1 is None
if self._make_cov_dt1:
with variable_scope.variable_scope(self._var_scope):
self._cov_dt1 = variable_scope.get_variable(
"cov_dt1",
initializer=init_ops.zeros_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype)
def register_option1quants(self, damping_func):
damping_id = self._register_damping(damping_func)
if damping_id not in self._option1quants_registrations:
self._option1quants_registrations.add(damping_id)
def register_option2quants(self, damping_func):
damping_id = self._register_damping(damping_func)
if damping_id not in self._option2quants_registrations:
self._option2quants_registrations.add(damping_id)
def instantiate_inv_variables(self):
super(FullyConnectedMultiKF, self).instantiate_inv_variables()
for damping_id in self._option1quants_registrations:
damping_func = self._damping_funcs_by_id[damping_id]
damping_string = graph_func_to_string(damping_func)
# It's questionable as to whether we should initialize with stuff like
# this at all. Ideally these values should never be used until they are
# updated at least once.
with variable_scope.variable_scope(self._var_scope):
Lmat = variable_scope.get_variable( # pylint: disable=invalid-name
"Lmat_damp{}".format(damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype)
psi = variable_scope.get_variable(
"psi_damp{}".format(damping_string),
initializer=init_ops.ones_initializer,
shape=self._vec_shape,
trainable=False,
dtype=self._dtype)
assert damping_id not in self._option1quants_by_damping
self._option1quants_by_damping[damping_id] = (Lmat, psi)
for damping_id in self._option2quants_registrations:
damping_func = self._damping_funcs_by_id[damping_id]
damping_string = graph_func_to_string(damping_func)
# It's questionable as to whether we should initialize with stuff like
# this at all. Ideally these values should never be used until they are
# updated at least once.
with variable_scope.variable_scope(self._var_scope):
Pmat = variable_scope.get_variable( # pylint: disable=invalid-name
"Lmat_damp{}".format(damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype)
Kmat = variable_scope.get_variable( # pylint: disable=invalid-name
"Kmat_damp{}".format(damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype)
mu = variable_scope.get_variable(
"mu_damp{}".format(damping_string),
initializer=init_ops.ones_initializer,
shape=self._vec_shape,
trainable=False,
dtype=self._dtype)
assert damping_id not in self._option2quants_by_damping
self._option2quants_by_damping[damping_id] = (Pmat, Kmat, mu)
def make_inverse_update_ops(self):
"""Create and return update ops corresponding to registered computations."""
# TODO(b/69918258): Add correctness tests for this method.
# pylint: disable=invalid-name
ops = []
if (len(self._option1quants_by_damping) +
len(self._option2quants_by_damping)):
# Note that C0 and C1 are stand-ins for A0 and A1, or G0 and G1, from
# the pseudo-code in the original paper. Because the computations for
# the A and G case are essentially the same they can both be performed by
# the same class (this one).
C1 = self.get_cov_dt1()
# Get the eigendecomposition of C0 (= self.get_cov())
eigen_e, eigen_V = self.get_eigendecomp()
# TODO(b/69678661): Note, there is an implicit assumption here that C1
# and C0 (as represented here by its eigen-decomp) are consistent. This
# could fail to be the case if self._cov and self._cov_dt1 are not updated
# consistently, or are somehow read between or during the cov updates.
# Can this possibly happen? Is there a way to prevent it?
for damping_id, (Lmat_var,
psi_var) in self._option1quants_by_damping.items():
damping = self._damping_funcs_by_id[damping_id]()
damping = math_ops.cast(damping, self._dtype)
invsqrtC0 = math_ops.matmul(
eigen_V * (eigen_e + damping)**(-0.5), eigen_V, transpose_b=True)
# Might need to enforce symmetry lost due to numerical issues.
invsqrtC0 = (invsqrtC0 + array_ops.transpose(invsqrtC0)) / 2.0
# The following line imposses the symmetry assumed by "Option 1" on C1.
# Stangely the code can work okay with this line commented out,
# depending on how psd_eig is defined. I'm not sure why.
C1 = (C1 + array_ops.transpose(C1)) / 2.0
# hPsi = C0^(-1/2) * C1 * C0^(-1/2) (hPsi means hat{Psi})
hPsi = math_ops.matmul(math_ops.matmul(invsqrtC0, C1), invsqrtC0)
# Compute the decomposition U*diag(psi)*U^T = hPsi
psi, U = utils.posdef_eig(hPsi)
# L = C0^(-1/2) * U
Lmat = math_ops.matmul(invsqrtC0, U)
ops.append(Lmat_var.assign(Lmat))
ops.append(psi_var.assign(psi))
for damping_id, (Pmat_var, Kmat_var,
mu_var) in self._option2quants_by_damping.items():
damping = self._damping_funcs_by_id[damping_id]()
damping = math_ops.cast(damping, self._dtype)
# compute C0^(-1/2)
invsqrtC0 = math_ops.matmul(
eigen_V * (eigen_e + damping)**(-0.5), eigen_V, transpose_b=True)
# Might need to enforce symmetry lost due to numerical issues.
invsqrtC0 = (invsqrtC0 + array_ops.transpose(invsqrtC0)) / 2.0
# Compute the product C0^(-1/2) * C1
invsqrtC0C1 = math_ops.matmul(invsqrtC0, C1)
# hPsi = C0^(-1/2) * C1 * C0^(-1/2) (hPsi means hat{Psi})
hPsi = math_ops.matmul(invsqrtC0C1, invsqrtC0)
# Compute the decomposition E*diag(mu)*E^T = hPsi^T * hPsi
# Note that we using the notation mu instead of "m" for the eigenvalues.
# Instead of computing the product hPsi^T * hPsi and then doing an
# eigen-decomposition of this we just compute the SVD of hPsi and then
# square the singular values to get the eigenvalues. For a justification
# of this approach, see:
# https://en.wikipedia.org/wiki/Singular-value_decomposition#Relation_to_eigenvalue_decomposition
sqrtmu, _, E = linalg_ops.svd(hPsi)
mu = math_ops.square(sqrtmu)
# Mathematically, the eigenvalues should not should not exceed 1.0, but
# due to numerical issues, or possible issues with inconsistent
# values of C1 and (the eigen-decomposition of) C0 they might. So
# we enforce this condition.
mu = math_ops.minimum(mu, 1.0)
# P = (C0^(-1/2) * C1)^T * C0^(-1/2) = C_1^T * C_0^(-1)
Pmat = math_ops.matmul(invsqrtC0C1, invsqrtC0, transpose_a=True)
# K = C_0^(-1/2) * E
Kmat = math_ops.matmul(invsqrtC0, E)
ops.append(Pmat_var.assign(Pmat))
ops.append(Kmat_var.assign(Kmat))
ops.append(mu_var.assign(mu))
ops += super(FullyConnectedMultiKF, self).make_inverse_update_ops()
return [control_flow_ops.group(*ops)]
# pylint: enable=invalid-name
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.