hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acfa0fdf93cba5568c251614dac534765c5249dd | 1,901 | py | Python | splinter/driver/webdriver/cookie_manager.py | alialdakheel/splinter | b4c48dc0af9ef98d7d9268f42f4d31a51e65fd68 | [
"BSD-3-Clause"
] | null | null | null | splinter/driver/webdriver/cookie_manager.py | alialdakheel/splinter | b4c48dc0af9ef98d7d9268f42f4d31a51e65fd68 | [
"BSD-3-Clause"
] | null | null | null | splinter/driver/webdriver/cookie_manager.py | alialdakheel/splinter | b4c48dc0af9ef98d7d9268f42f4d31a51e65fd68 | [
"BSD-3-Clause"
] | 1 | 2022-03-10T15:23:53.000Z | 2022-03-10T15:23:53.000Z | # -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import sys
from splinter.cookie_manager import CookieManagerAPI
if sys.version_info[0] > 2:
from urllib.parse import urlparse
else:
from urlparse import urlparse # NOQA
class CookieManager(CookieManagerAPI):
def add(self, cookie, **kwargs):
for key, value in cookie.items():
kwargs['name'] = key
kwargs['value'] = value
self.driver.add_cookie(kwargs)
def delete(self, *cookies):
if cookies:
for cookie in cookies:
self.driver.delete_cookie(cookie)
else:
self.delete_all()
def delete_all(self):
self.driver.delete_all_cookies()
def all(self, verbose=False): # NOQA: A003
if not verbose:
cleaned_cookies = {}
cookies = self.driver.get_cookies()
for cookie in cookies:
if not cookie["domain"].startswith("."):
cookie_domain = cookie["domain"]
else:
cookie_domain = cookie["domain"][1:]
if cookie_domain in urlparse(self.driver.current_url).netloc:
cleaned_cookies[cookie["name"]] = cookie["value"]
return cleaned_cookies
return self.driver.get_cookies()
def __getitem__(self, item):
return self.driver.get_cookie(item)["value"]
def __contains__(self, key):
return self.driver.get_cookie(key) is not None
def __eq__(self, other_object):
cookies = {}
for cookie in self.driver.get_cookies():
cookies[cookie["name"]] = cookie["value"]
if isinstance(other_object, dict):
return dict(cookies) == other_object
return False
| 29.703125 | 77 | 0.601262 |
acfa0fef68b5b1e295acc3f947698a4c1f90060a | 2,784 | py | Python | anima/rig/curve.py | nocturne25/anima | 225f6d92c169dc81694d630c81c90aacfcffc1c7 | [
"BSD-2-Clause"
] | 7 | 2016-03-30T14:43:33.000Z | 2020-11-12T17:56:40.000Z | anima/rig/curve.py | nocturne25/anima | 225f6d92c169dc81694d630c81c90aacfcffc1c7 | [
"BSD-2-Clause"
] | null | null | null | anima/rig/curve.py | nocturne25/anima | 225f6d92c169dc81694d630c81c90aacfcffc1c7 | [
"BSD-2-Clause"
] | 3 | 2017-04-13T04:29:04.000Z | 2019-05-08T00:28:44.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import pymel.core as pm
class Curve(object):
def __init__(self, name_in, curve):
self._curveNode = pm.nt.Transform(pm.rename(curve, name_in))
self._curveInfo = self._create_curveInfo()
self._degree = None
self._spans = None
self._arcLen = None
self._numCVs = None
self._cvPositions = []
for j in range (0, self.numCVs):
self._cvPositions.append(pm.pointPosition(self.curveNode.cv[j], w = 1))
@property
# Curve Node Getter
def curveNode(self):
return self._curveNode
@property
# Curve Degree : self._degree Setter - Getter
def degree(self):
self._degree = pm.getAttr(self.curveNode.degree)
return self._degree
@degree.setter
def degree(self, degree):
self._degree = degree
@property
# Curve Spans : self._spans Setter - Getter
def spans(self):
self._spans = pm.getAttr(self.curveNode.spans)
return self._spans
@spans.setter
def spans(self, span):
self._spans = span
@property
# Number of CVs : self._numCvs Setter - Getter
def numCVs(self):
self._numCVs = self.degree + self.spans
return self._numCVs
@property
# CV Positions : Gets the positions of cvs
def cvPositions(self):
return self._cvPositions
@property
# CurveInfo Getter - Setter
def curveInfo(self):
return self._curveInfo
@curveInfo.setter
def curveInfo(self, infoNode):
self._curveInfo = infoNode
pm.connectAttr(self.curveNode.worldSpace, infoNode.inputCurve)
@property
# ArcLength of the Curve Getter
def arclen(self):
self._arcLen = pm.getAttr(self.curveInfo.arcLength)
return self._arcLen
def rebuildCurve(self, spans):
# Rebuild the curveNode
pm.rebuildCurve(self.curveNode, rpo = 1, rt = 0, end = 1, kr = 0, kcp = 0,
kep = 1, kt = 0, s = spans, d = 3, tol = 0.01)
del self._cvPositions[:]
for j in range (0, self.numCVs):
self._cvPositions.append(pm.pointPosition(self.curveNode.cv[j], w = 1))
def _create_curveInfo(self):
#create a new CurveInfo Node
self._curveInfo = pm.createNode("curveInfo", n= self._curveNode +
"_curveInfo")
pm.connectAttr(self._curveNode.worldSpace, self._curveInfo.inputCurve)
return self._curveInfo
| 28.408163 | 84 | 0.595905 |
acfa106582a1b33e12cd3dde76fbed90b9db62da | 911 | py | Python | insta/admin.py | Alchemy17/photogram | f6dc2da623c4b2df69470ae7eb8cc21fca5669a7 | [
"MIT"
] | null | null | null | insta/admin.py | Alchemy17/photogram | f6dc2da623c4b2df69470ae7eb8cc21fca5669a7 | [
"MIT"
] | null | null | null | insta/admin.py | Alchemy17/photogram | f6dc2da623c4b2df69470ae7eb8cc21fca5669a7 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Image, Profile, Comment
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
class ProfileInline(admin.StackedInline):
model = Profile
fk_name = 'user'
class CustomUserAdmin(UserAdmin):
inlines = (ProfileInline, )
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff', 'get_location')
def get_inline_instances(self, request, obj=None):
if not obj:
return list()
return super(CustomUserAdmin, self).get_inline_instances(request, obj)
def get_location(self, instance):
return instance.profile.user
get_location.short_description = 'User'
admin.site.unregister(User)
admin.site.register(User, CustomUserAdmin)
admin.site.register(Image)
admin.site.register(Profile)
admin.site.register(Comment) | 31.413793 | 95 | 0.739846 |
acfa108cb092c44857a2246c013091c57b53b4cc | 81,793 | py | Python | src/main/resources/twisted/test/test_process.py | olamy/autobahntestsuite-maven-plugin | 1d26f21cf7828d80d0e80bb783999d9283f023be | [
"Apache-2.0"
] | 3 | 2016-02-01T02:29:51.000Z | 2020-09-04T17:19:24.000Z | src/main/resources/twisted/test/test_process.py | olamy/autobahntestsuite-maven-plugin | 1d26f21cf7828d80d0e80bb783999d9283f023be | [
"Apache-2.0"
] | 4 | 2017-02-19T23:58:13.000Z | 2019-11-01T15:31:22.000Z | src/main/resources/twisted/test/test_process.py | olamy/autobahntestsuite-maven-plugin | 1d26f21cf7828d80d0e80bb783999d9283f023be | [
"Apache-2.0"
] | 6 | 2017-02-13T09:11:02.000Z | 2021-06-29T11:22:18.000Z | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test running processes.
"""
import gzip
import os
import sys
import signal
import StringIO
import errno
import gc
import stat
import operator
try:
import fcntl
except ImportError:
fcntl = process = None
else:
from twisted.internet import process
from zope.interface.verify import verifyObject
from twisted.python.log import msg
from twisted.internet import reactor, protocol, error, interfaces, defer
from twisted.trial import unittest
from twisted.python import util, runtime, procutils
class StubProcessProtocol(protocol.ProcessProtocol):
"""
ProcessProtocol counter-implementation: all methods on this class raise an
exception, so instances of this may be used to verify that only certain
methods are called.
"""
def outReceived(self, data):
raise NotImplementedError()
def errReceived(self, data):
raise NotImplementedError()
def inConnectionLost(self):
raise NotImplementedError()
def outConnectionLost(self):
raise NotImplementedError()
def errConnectionLost(self):
raise NotImplementedError()
class ProcessProtocolTests(unittest.TestCase):
"""
Tests for behavior provided by the process protocol base class,
L{protocol.ProcessProtocol}.
"""
def test_interface(self):
"""
L{ProcessProtocol} implements L{IProcessProtocol}.
"""
verifyObject(interfaces.IProcessProtocol, protocol.ProcessProtocol())
def test_outReceived(self):
"""
Verify that when stdout is delivered to
L{ProcessProtocol.childDataReceived}, it is forwarded to
L{ProcessProtocol.outReceived}.
"""
received = []
class OutProtocol(StubProcessProtocol):
def outReceived(self, data):
received.append(data)
bytes = "bytes"
p = OutProtocol()
p.childDataReceived(1, bytes)
self.assertEqual(received, [bytes])
def test_errReceived(self):
"""
Similar to L{test_outReceived}, but for stderr.
"""
received = []
class ErrProtocol(StubProcessProtocol):
def errReceived(self, data):
received.append(data)
bytes = "bytes"
p = ErrProtocol()
p.childDataReceived(2, bytes)
self.assertEqual(received, [bytes])
def test_inConnectionLost(self):
"""
Verify that when stdin close notification is delivered to
L{ProcessProtocol.childConnectionLost}, it is forwarded to
L{ProcessProtocol.inConnectionLost}.
"""
lost = []
class InLostProtocol(StubProcessProtocol):
def inConnectionLost(self):
lost.append(None)
p = InLostProtocol()
p.childConnectionLost(0)
self.assertEqual(lost, [None])
def test_outConnectionLost(self):
"""
Similar to L{test_inConnectionLost}, but for stdout.
"""
lost = []
class OutLostProtocol(StubProcessProtocol):
def outConnectionLost(self):
lost.append(None)
p = OutLostProtocol()
p.childConnectionLost(1)
self.assertEqual(lost, [None])
def test_errConnectionLost(self):
"""
Similar to L{test_inConnectionLost}, but for stderr.
"""
lost = []
class ErrLostProtocol(StubProcessProtocol):
def errConnectionLost(self):
lost.append(None)
p = ErrLostProtocol()
p.childConnectionLost(2)
self.assertEqual(lost, [None])
class TrivialProcessProtocol(protocol.ProcessProtocol):
"""
Simple process protocol for tests purpose.
@ivar outData: data received from stdin
@ivar errData: data received from stderr
"""
def __init__(self, d):
"""
Create the deferred that will be fired at the end, and initialize
data structures.
"""
self.deferred = d
self.outData = []
self.errData = []
def processEnded(self, reason):
self.reason = reason
self.deferred.callback(None)
def outReceived(self, data):
self.outData.append(data)
def errReceived(self, data):
self.errData.append(data)
class TestProcessProtocol(protocol.ProcessProtocol):
def connectionMade(self):
self.stages = [1]
self.data = ''
self.err = ''
self.transport.write("abcd")
def childDataReceived(self, childFD, data):
"""
Override and disable the dispatch provided by the base class to ensure
that it is really this method which is being called, and the transport
is not going directly to L{outReceived} or L{errReceived}.
"""
if childFD == 1:
self.data += data
elif childFD == 2:
self.err += data
def childConnectionLost(self, childFD):
"""
Similarly to L{childDataReceived}, disable the automatic dispatch
provided by the base implementation to verify that the transport is
calling this method directly.
"""
if childFD == 1:
self.stages.append(2)
if self.data != "abcd":
raise RuntimeError(
"Data was %r instead of 'abcd'" % (self.data,))
self.transport.write("1234")
elif childFD == 2:
self.stages.append(3)
if self.err != "1234":
raise RuntimeError(
"Err was %r instead of '1234'" % (self.err,))
self.transport.write("abcd")
self.stages.append(4)
elif childFD == 0:
self.stages.append(5)
def processEnded(self, reason):
self.reason = reason
self.deferred.callback(None)
class EchoProtocol(protocol.ProcessProtocol):
s = "1234567" * 1001
n = 10
finished = 0
failure = None
def __init__(self, onEnded):
self.onEnded = onEnded
self.count = 0
def connectionMade(self):
assert self.n > 2
for i in range(self.n - 2):
self.transport.write(self.s)
# test writeSequence
self.transport.writeSequence([self.s, self.s])
self.buffer = self.s * self.n
def outReceived(self, data):
if buffer(self.buffer, self.count, len(data)) != buffer(data):
self.failure = ("wrong bytes received", data, self.count)
self.transport.closeStdin()
else:
self.count += len(data)
if self.count == len(self.buffer):
self.transport.closeStdin()
def processEnded(self, reason):
self.finished = 1
if not reason.check(error.ProcessDone):
self.failure = "process didn't terminate normally: " + str(reason)
self.onEnded.callback(self)
class SignalProtocol(protocol.ProcessProtocol):
"""
A process protocol that sends a signal when data is first received.
@ivar deferred: deferred firing on C{processEnded}.
@type deferred: L{defer.Deferred}
@ivar signal: the signal to send to the process.
@type signal: C{str}
@ivar signaled: A flag tracking whether the signal has been sent to the
child or not yet. C{False} until it is sent, then C{True}.
@type signaled: C{bool}
"""
def __init__(self, deferred, sig):
self.deferred = deferred
self.signal = sig
self.signaled = False
def outReceived(self, data):
"""
Handle the first output from the child process (which indicates it
is set up and ready to receive the signal) by sending the signal to
it. Also log all output to help with debugging.
"""
msg("Received %r from child stdout" % (data,))
if not self.signaled:
self.signaled = True
self.transport.signalProcess(self.signal)
def errReceived(self, data):
"""
Log all data received from the child's stderr to help with
debugging.
"""
msg("Received %r from child stderr" % (data,))
def processEnded(self, reason):
"""
Callback C{self.deferred} with C{None} if C{reason} is a
L{error.ProcessTerminated} failure with C{exitCode} set to C{None},
C{signal} set to C{self.signal}, and C{status} holding the status code
of the exited process. Otherwise, errback with a C{ValueError}
describing the problem.
"""
msg("Child exited: %r" % (reason.getTraceback(),))
if not reason.check(error.ProcessTerminated):
return self.deferred.errback(
ValueError("wrong termination: %s" % (reason,)))
v = reason.value
if isinstance(self.signal, str):
signalValue = getattr(signal, 'SIG' + self.signal)
else:
signalValue = self.signal
if v.exitCode is not None:
return self.deferred.errback(
ValueError("SIG%s: exitCode is %s, not None" %
(self.signal, v.exitCode)))
if v.signal != signalValue:
return self.deferred.errback(
ValueError("SIG%s: .signal was %s, wanted %s" %
(self.signal, v.signal, signalValue)))
if os.WTERMSIG(v.status) != signalValue:
return self.deferred.errback(
ValueError('SIG%s: %s' % (self.signal, os.WTERMSIG(v.status))))
self.deferred.callback(None)
class TestManyProcessProtocol(TestProcessProtocol):
def __init__(self):
self.deferred = defer.Deferred()
def processEnded(self, reason):
self.reason = reason
if reason.check(error.ProcessDone):
self.deferred.callback(None)
else:
self.deferred.errback(reason)
class UtilityProcessProtocol(protocol.ProcessProtocol):
"""
Helper class for launching a Python process and getting a result from it.
@ivar program: A string giving a Python program for the child process to
run.
"""
program = None
def run(cls, reactor, argv, env):
"""
Run a Python process connected to a new instance of this protocol
class. Return the protocol instance.
The Python process is given C{self.program} on the command line to
execute, in addition to anything specified by C{argv}. C{env} is
the complete environment.
"""
exe = sys.executable
self = cls()
reactor.spawnProcess(
self, exe, [exe, "-c", self.program] + argv, env=env)
return self
run = classmethod(run)
def __init__(self):
self.bytes = []
self.requests = []
def parseChunks(self, bytes):
"""
Called with all bytes received on stdout when the process exits.
"""
raise NotImplementedError()
def getResult(self):
"""
Return a Deferred which will fire with the result of L{parseChunks}
when the child process exits.
"""
d = defer.Deferred()
self.requests.append(d)
return d
def _fireResultDeferreds(self, result):
"""
Callback all Deferreds returned up until now by L{getResult}
with the given result object.
"""
requests = self.requests
self.requests = None
for d in requests:
d.callback(result)
def outReceived(self, bytes):
"""
Accumulate output from the child process in a list.
"""
self.bytes.append(bytes)
def processEnded(self, reason):
"""
Handle process termination by parsing all received output and firing
any waiting Deferreds.
"""
self._fireResultDeferreds(self.parseChunks(self.bytes))
class GetArgumentVector(UtilityProcessProtocol):
"""
Protocol which will read a serialized argv from a process and
expose it to interested parties.
"""
program = (
"from sys import stdout, argv\n"
"stdout.write(chr(0).join(argv))\n"
"stdout.flush()\n")
def parseChunks(self, chunks):
"""
Parse the output from the process to which this protocol was
connected, which is a single unterminated line of \\0-separated
strings giving the argv of that process. Return this as a list of
str objects.
"""
return ''.join(chunks).split('\0')
class GetEnvironmentDictionary(UtilityProcessProtocol):
"""
Protocol which will read a serialized environment dict from a process
and expose it to interested parties.
"""
program = (
"from sys import stdout\n"
"from os import environ\n"
"items = environ.iteritems()\n"
"stdout.write(chr(0).join([k + chr(0) + v for k, v in items]))\n"
"stdout.flush()\n")
def parseChunks(self, chunks):
"""
Parse the output from the process to which this protocol was
connected, which is a single unterminated line of \\0-separated
strings giving key value pairs of the environment from that process.
Return this as a dictionary.
"""
environString = ''.join(chunks)
if not environString:
return {}
environ = iter(environString.split('\0'))
d = {}
while 1:
try:
k = environ.next()
except StopIteration:
break
else:
v = environ.next()
d[k] = v
return d
class ProcessTestCase(unittest.TestCase):
"""Test running a process."""
usePTY = False
def testStdio(self):
"""twisted.internet.stdio test."""
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_twisted.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
env = {"PYTHONPATH": os.pathsep.join(sys.path)}
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=env,
path=None, usePTY=self.usePTY)
p.transport.write("hello, world")
p.transport.write("abc")
p.transport.write("123")
p.transport.closeStdin()
def processEnded(ign):
self.assertEqual(p.outF.getvalue(), "hello, worldabc123",
"Output follows:\n"
"%s\n"
"Error message from process_twisted follows:\n"
"%s\n" % (p.outF.getvalue(), p.errF.getvalue()))
return d.addCallback(processEnded)
def test_unsetPid(self):
"""
Test if pid is None/non-None before/after process termination. This
reuses process_echoer.py to get a process that blocks on stdin.
"""
finished = defer.Deferred()
p = TrivialProcessProtocol(finished)
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_echoer.py")
procTrans = reactor.spawnProcess(p, exe,
[exe, scriptPath], env=None)
self.failUnless(procTrans.pid)
def afterProcessEnd(ignored):
self.assertEqual(procTrans.pid, None)
p.transport.closeStdin()
return finished.addCallback(afterProcessEnd)
def test_process(self):
"""
Test running a process: check its output, it exitCode, some property of
signalProcess.
"""
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_tester.py")
d = defer.Deferred()
p = TestProcessProtocol()
p.deferred = d
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None)
def check(ignored):
self.assertEqual(p.stages, [1, 2, 3, 4, 5])
f = p.reason
f.trap(error.ProcessTerminated)
self.assertEqual(f.value.exitCode, 23)
# would .signal be available on non-posix?
# self.assertEqual(f.value.signal, None)
self.assertRaises(
error.ProcessExitedAlready, p.transport.signalProcess, 'INT')
try:
import process_tester, glob
for f in glob.glob(process_tester.test_file_match):
os.remove(f)
except:
pass
d.addCallback(check)
return d
def testManyProcesses(self):
def _check(results, protocols):
for p in protocols:
self.assertEqual(p.stages, [1, 2, 3, 4, 5], "[%d] stages = %s" % (id(p.transport), str(p.stages)))
# test status code
f = p.reason
f.trap(error.ProcessTerminated)
self.assertEqual(f.value.exitCode, 23)
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_tester.py")
args = [exe, "-u", scriptPath]
protocols = []
deferreds = []
for i in xrange(50):
p = TestManyProcessProtocol()
protocols.append(p)
reactor.spawnProcess(p, exe, args, env=None)
deferreds.append(p.deferred)
deferredList = defer.DeferredList(deferreds, consumeErrors=True)
deferredList.addCallback(_check, protocols)
return deferredList
def test_echo(self):
"""
A spawning a subprocess which echoes its stdin to its stdout via
C{reactor.spawnProcess} will result in that echoed output being
delivered to outReceived.
"""
finished = defer.Deferred()
p = EchoProtocol(finished)
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_echoer.py")
reactor.spawnProcess(p, exe, [exe, scriptPath], env=None)
def asserts(ignored):
self.failIf(p.failure, p.failure)
self.failUnless(hasattr(p, 'buffer'))
self.assertEqual(len(''.join(p.buffer)), len(p.s * p.n))
def takedownProcess(err):
p.transport.closeStdin()
return err
return finished.addCallback(asserts).addErrback(takedownProcess)
def testCommandLine(self):
args = [r'a\"b ', r'a\b ', r' a\\"b', r' a\\b', r'"foo bar" "', '\tab', '"\\', 'a"b', "a'b"]
pyExe = sys.executable
scriptPath = util.sibpath(__file__, "process_cmdline.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, pyExe, [pyExe, "-u", scriptPath]+args, env=None,
path=None)
def processEnded(ign):
self.assertEqual(p.errF.getvalue(), "")
recvdArgs = p.outF.getvalue().splitlines()
self.assertEqual(recvdArgs, args)
return d.addCallback(processEnded)
def test_wrongArguments(self):
"""
Test invalid arguments to spawnProcess: arguments and environment
must only contains string or unicode, and not null bytes.
"""
exe = sys.executable
p = protocol.ProcessProtocol()
badEnvs = [
{"foo": 2},
{"foo": "egg\0a"},
{3: "bar"},
{"bar\0foo": "bar"}]
badArgs = [
[exe, 2],
"spam",
[exe, "foo\0bar"]]
# Sanity check - this will fail for people who have mucked with
# their site configuration in a stupid way, but there's nothing we
# can do about that.
badUnicode = u'\N{SNOWMAN}'
try:
badUnicode.encode(sys.getdefaultencoding())
except UnicodeEncodeError:
# Okay, that unicode doesn't encode, put it in as a bad environment
# key.
badEnvs.append({badUnicode: 'value for bad unicode key'})
badEnvs.append({'key for bad unicode value': badUnicode})
badArgs.append([exe, badUnicode])
else:
# It _did_ encode. Most likely, Gtk2 is being used and the
# default system encoding is UTF-8, which can encode anything.
# In any case, if implicit unicode -> str conversion works for
# that string, we can't test that TypeError gets raised instead,
# so just leave it off.
pass
for env in badEnvs:
self.assertRaises(
TypeError,
reactor.spawnProcess, p, exe, [exe, "-c", ""], env=env)
for args in badArgs:
self.assertRaises(
TypeError,
reactor.spawnProcess, p, exe, args, env=None)
# Use upper-case so that the environment key test uses an upper case
# name: some versions of Windows only support upper case environment
# variable names, and I think Python (as of 2.5) doesn't use the right
# syscall for lowercase or mixed case names to work anyway.
okayUnicode = u"UNICODE"
encodedValue = "UNICODE"
def _deprecatedUnicodeSupportTest(self, processProtocolClass, argv=[], env={}):
"""
Check that a deprecation warning is emitted when passing unicode to
spawnProcess for an argv value or an environment key or value.
Check that the warning is of the right type, has the right message,
and refers to the correct file. Unfortunately, don't check that the
line number is correct, because that is too hard for me to figure
out.
@param processProtocolClass: A L{UtilityProcessProtocol} subclass
which will be instantiated to communicate with the child process.
@param argv: The argv argument to spawnProcess.
@param env: The env argument to spawnProcess.
@return: A Deferred which fires when the test is complete.
"""
# Sanity to check to make sure we can actually encode this unicode
# with the default system encoding. This may be excessively
# paranoid. -exarkun
self.assertEqual(
self.okayUnicode.encode(sys.getdefaultencoding()),
self.encodedValue)
p = self.assertWarns(DeprecationWarning,
"Argument strings and environment keys/values passed to "
"reactor.spawnProcess should be str, not unicode.", __file__,
processProtocolClass.run, reactor, argv, env)
return p.getResult()
def test_deprecatedUnicodeArgvSupport(self):
"""
Test that a unicode string passed for an argument value is allowed
if it can be encoded with the default system encoding, but that a
deprecation warning is emitted.
"""
d = self._deprecatedUnicodeSupportTest(GetArgumentVector, argv=[self.okayUnicode])
def gotArgVector(argv):
self.assertEqual(argv, ['-c', self.encodedValue])
d.addCallback(gotArgVector)
return d
def test_deprecatedUnicodeEnvKeySupport(self):
"""
Test that a unicode string passed for the key of the environment
dictionary is allowed if it can be encoded with the default system
encoding, but that a deprecation warning is emitted.
"""
d = self._deprecatedUnicodeSupportTest(
GetEnvironmentDictionary, env={self.okayUnicode: self.encodedValue})
def gotEnvironment(environ):
self.assertEqual(environ[self.encodedValue], self.encodedValue)
d.addCallback(gotEnvironment)
return d
def test_deprecatedUnicodeEnvValueSupport(self):
"""
Test that a unicode string passed for the value of the environment
dictionary is allowed if it can be encoded with the default system
encoding, but that a deprecation warning is emitted.
"""
d = self._deprecatedUnicodeSupportTest(
GetEnvironmentDictionary, env={self.encodedValue: self.okayUnicode})
def gotEnvironment(environ):
# On Windows, the environment contains more things than we
# specified, so only make sure that at least the key we wanted
# is there, rather than testing the dictionary for exact
# equality.
self.assertEqual(environ[self.encodedValue], self.encodedValue)
d.addCallback(gotEnvironment)
return d
class TwoProcessProtocol(protocol.ProcessProtocol):
num = -1
finished = 0
def __init__(self):
self.deferred = defer.Deferred()
def outReceived(self, data):
pass
def processEnded(self, reason):
self.finished = 1
self.deferred.callback(None)
class TestTwoProcessesBase:
def setUp(self):
self.processes = [None, None]
self.pp = [None, None]
self.done = 0
self.verbose = 0
def createProcesses(self, usePTY=0):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_reader.py")
for num in (0,1):
self.pp[num] = TwoProcessProtocol()
self.pp[num].num = num
p = reactor.spawnProcess(self.pp[num],
exe, [exe, "-u", scriptPath], env=None,
usePTY=usePTY)
self.processes[num] = p
def close(self, num):
if self.verbose: print "closing stdin [%d]" % num
p = self.processes[num]
pp = self.pp[num]
self.failIf(pp.finished, "Process finished too early")
p.loseConnection()
if self.verbose: print self.pp[0].finished, self.pp[1].finished
def _onClose(self):
return defer.gatherResults([ p.deferred for p in self.pp ])
def testClose(self):
if self.verbose: print "starting processes"
self.createProcesses()
reactor.callLater(1, self.close, 0)
reactor.callLater(2, self.close, 1)
return self._onClose()
class TestTwoProcessesNonPosix(TestTwoProcessesBase, unittest.TestCase):
pass
class TestTwoProcessesPosix(TestTwoProcessesBase, unittest.TestCase):
def tearDown(self):
for pp, pr in zip(self.pp, self.processes):
if not pp.finished:
try:
os.kill(pr.pid, signal.SIGTERM)
except OSError:
# If the test failed the process may already be dead
# The error here is only noise
pass
return self._onClose()
def kill(self, num):
if self.verbose: print "kill [%d] with SIGTERM" % num
p = self.processes[num]
pp = self.pp[num]
self.failIf(pp.finished, "Process finished too early")
os.kill(p.pid, signal.SIGTERM)
if self.verbose: print self.pp[0].finished, self.pp[1].finished
def testKill(self):
if self.verbose: print "starting processes"
self.createProcesses(usePTY=0)
reactor.callLater(1, self.kill, 0)
reactor.callLater(2, self.kill, 1)
return self._onClose()
def testClosePty(self):
if self.verbose: print "starting processes"
self.createProcesses(usePTY=1)
reactor.callLater(1, self.close, 0)
reactor.callLater(2, self.close, 1)
return self._onClose()
def testKillPty(self):
if self.verbose: print "starting processes"
self.createProcesses(usePTY=1)
reactor.callLater(1, self.kill, 0)
reactor.callLater(2, self.kill, 1)
return self._onClose()
class FDChecker(protocol.ProcessProtocol):
state = 0
data = ""
failed = None
def __init__(self, d):
self.deferred = d
def fail(self, why):
self.failed = why
self.deferred.callback(None)
def connectionMade(self):
self.transport.writeToChild(0, "abcd")
self.state = 1
def childDataReceived(self, childFD, data):
if self.state == 1:
if childFD != 1:
self.fail("read '%s' on fd %d (not 1) during state 1" \
% (childFD, data))
return
self.data += data
#print "len", len(self.data)
if len(self.data) == 6:
if self.data != "righto":
self.fail("got '%s' on fd1, expected 'righto'" \
% self.data)
return
self.data = ""
self.state = 2
#print "state2", self.state
self.transport.writeToChild(3, "efgh")
return
if self.state == 2:
self.fail("read '%s' on fd %s during state 2" % (childFD, data))
return
if self.state == 3:
if childFD != 1:
self.fail("read '%s' on fd %s (not 1) during state 3" \
% (childFD, data))
return
self.data += data
if len(self.data) == 6:
if self.data != "closed":
self.fail("got '%s' on fd1, expected 'closed'" \
% self.data)
return
self.state = 4
return
if self.state == 4:
self.fail("read '%s' on fd %s during state 4" % (childFD, data))
return
def childConnectionLost(self, childFD):
if self.state == 1:
self.fail("got connectionLost(%d) during state 1" % childFD)
return
if self.state == 2:
if childFD != 4:
self.fail("got connectionLost(%d) (not 4) during state 2" \
% childFD)
return
self.state = 3
self.transport.closeChildFD(5)
return
def processEnded(self, status):
rc = status.value.exitCode
if self.state != 4:
self.fail("processEnded early, rc %d" % rc)
return
if status.value.signal != None:
self.fail("processEnded with signal %s" % status.value.signal)
return
if rc != 0:
self.fail("processEnded with rc %d" % rc)
return
self.deferred.callback(None)
class FDTest(unittest.TestCase):
def testFD(self):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_fds.py")
d = defer.Deferred()
p = FDChecker(d)
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
path=None,
childFDs={0:"w", 1:"r", 2:2,
3:"w", 4:"r", 5:"w"})
d.addCallback(lambda x : self.failIf(p.failed, p.failed))
return d
def testLinger(self):
# See what happens when all the pipes close before the process
# actually stops. This test *requires* SIGCHLD catching to work,
# as there is no other way to find out the process is done.
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_linger.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
path=None,
childFDs={1:"r", 2:2},
)
def processEnded(ign):
self.assertEqual(p.outF.getvalue(),
"here is some text\ngoodbye\n")
return d.addCallback(processEnded)
class Accumulator(protocol.ProcessProtocol):
"""Accumulate data from a process."""
closed = 0
endedDeferred = None
def connectionMade(self):
self.outF = StringIO.StringIO()
self.errF = StringIO.StringIO()
def outReceived(self, d):
self.outF.write(d)
def errReceived(self, d):
self.errF.write(d)
def outConnectionLost(self):
pass
def errConnectionLost(self):
pass
def processEnded(self, reason):
self.closed = 1
if self.endedDeferred is not None:
d, self.endedDeferred = self.endedDeferred, None
d.callback(None)
class PosixProcessBase:
"""
Test running processes.
"""
usePTY = False
def getCommand(self, commandName):
"""
Return the path of the shell command named C{commandName}, looking at
common locations.
"""
if os.path.exists('/bin/%s' % (commandName,)):
cmd = '/bin/%s' % (commandName,)
elif os.path.exists('/usr/bin/%s' % (commandName,)):
cmd = '/usr/bin/%s' % (commandName,)
else:
raise RuntimeError(
"%s not found in /bin or /usr/bin" % (commandName,))
return cmd
def testNormalTermination(self):
cmd = self.getCommand('true')
d = defer.Deferred()
p = TrivialProcessProtocol(d)
reactor.spawnProcess(p, cmd, ['true'], env=None,
usePTY=self.usePTY)
def check(ignored):
p.reason.trap(error.ProcessDone)
self.assertEqual(p.reason.value.exitCode, 0)
self.assertEqual(p.reason.value.signal, None)
d.addCallback(check)
return d
def test_abnormalTermination(self):
"""
When a process terminates with a system exit code set to 1,
C{processEnded} is called with a L{error.ProcessTerminated} error,
the C{exitCode} attribute reflecting the system exit code.
"""
exe = sys.executable
d = defer.Deferred()
p = TrivialProcessProtocol(d)
reactor.spawnProcess(p, exe, [exe, '-c', 'import sys; sys.exit(1)'],
env=None, usePTY=self.usePTY)
def check(ignored):
p.reason.trap(error.ProcessTerminated)
self.assertEqual(p.reason.value.exitCode, 1)
self.assertEqual(p.reason.value.signal, None)
d.addCallback(check)
return d
def _testSignal(self, sig):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_signal.py")
d = defer.Deferred()
p = SignalProtocol(d, sig)
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
usePTY=self.usePTY)
return d
def test_signalHUP(self):
"""
Sending the SIGHUP signal to a running process interrupts it, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} set to C{None} and the C{signal} attribute set to
C{signal.SIGHUP}. C{os.WTERMSIG} can also be used on the C{status}
attribute to extract the signal value.
"""
return self._testSignal('HUP')
def test_signalINT(self):
"""
Sending the SIGINT signal to a running process interrupts it, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} set to C{None} and the C{signal} attribute set to
C{signal.SIGINT}. C{os.WTERMSIG} can also be used on the C{status}
attribute to extract the signal value.
"""
return self._testSignal('INT')
def test_signalKILL(self):
"""
Sending the SIGKILL signal to a running process interrupts it, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} set to C{None} and the C{signal} attribute set to
C{signal.SIGKILL}. C{os.WTERMSIG} can also be used on the C{status}
attribute to extract the signal value.
"""
return self._testSignal('KILL')
def test_signalTERM(self):
"""
Sending the SIGTERM signal to a running process interrupts it, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} set to C{None} and the C{signal} attribute set to
C{signal.SIGTERM}. C{os.WTERMSIG} can also be used on the C{status}
attribute to extract the signal value.
"""
return self._testSignal('TERM')
def test_childSignalHandling(self):
"""
The disposition of signals which are ignored in the parent
process is reset to the default behavior for the child
process.
"""
# Somewhat arbitrarily select SIGUSR1 here. It satisfies our
# requirements that:
# - The interpreter not fiddle around with the handler
# behind our backs at startup time (this disqualifies
# signals like SIGINT and SIGPIPE).
# - The default behavior is to exit.
#
# This lets us send the signal to the child and then verify
# that it exits with a status code indicating that it was
# indeed the signal which caused it to exit.
which = signal.SIGUSR1
# Ignore the signal in the parent (and make sure we clean it
# up).
handler = signal.signal(which, signal.SIG_IGN)
self.addCleanup(signal.signal, signal.SIGUSR1, handler)
# Now do the test.
return self._testSignal(signal.SIGUSR1)
def test_executionError(self):
"""
Raise an error during execvpe to check error management.
"""
cmd = self.getCommand('false')
d = defer.Deferred()
p = TrivialProcessProtocol(d)
def buggyexecvpe(command, args, environment):
raise RuntimeError("Ouch")
oldexecvpe = os.execvpe
os.execvpe = buggyexecvpe
try:
reactor.spawnProcess(p, cmd, ['false'], env=None,
usePTY=self.usePTY)
def check(ignored):
errData = "".join(p.errData + p.outData)
self.assertIn("Upon execvpe", errData)
self.assertIn("Ouch", errData)
d.addCallback(check)
finally:
os.execvpe = oldexecvpe
return d
def test_errorInProcessEnded(self):
"""
The handler which reaps a process is removed when the process is
reaped, even if the protocol's C{processEnded} method raises an
exception.
"""
connected = defer.Deferred()
ended = defer.Deferred()
# This script runs until we disconnect its transport.
pythonExecutable = sys.executable
scriptPath = util.sibpath(__file__, "process_echoer.py")
class ErrorInProcessEnded(protocol.ProcessProtocol):
"""
A protocol that raises an error in C{processEnded}.
"""
def makeConnection(self, transport):
connected.callback(transport)
def processEnded(self, reason):
reactor.callLater(0, ended.callback, None)
raise RuntimeError("Deliberate error")
# Launch the process.
reactor.spawnProcess(
ErrorInProcessEnded(), pythonExecutable,
[pythonExecutable, scriptPath],
env=None, path=None)
pid = []
def cbConnected(transport):
pid.append(transport.pid)
# There's now a reap process handler registered.
self.assertIn(transport.pid, process.reapProcessHandlers)
# Kill the process cleanly, triggering an error in the protocol.
transport.loseConnection()
connected.addCallback(cbConnected)
def checkTerminated(ignored):
# The exception was logged.
excs = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(excs), 1)
# The process is no longer scheduled for reaping.
self.assertNotIn(pid[0], process.reapProcessHandlers)
ended.addCallback(checkTerminated)
return ended
class MockSignal(object):
"""
Neuter L{signal.signal}, but pass other attributes unscathed
"""
def signal(self, sig, action):
return signal.getsignal(sig)
def __getattr__(self, attr):
return getattr(signal, attr)
class MockOS(object):
"""
The mock OS: overwrite L{os}, L{fcntl} and {sys} functions with fake ones.
@ivar exited: set to True when C{_exit} is called.
@type exited: C{bool}
@ivar O_RDWR: dumb value faking C{os.O_RDWR}.
@type O_RDWR: C{int}
@ivar O_NOCTTY: dumb value faking C{os.O_NOCTTY}.
@type O_NOCTTY: C{int}
@ivar WNOHANG: dumb value faking C{os.WNOHANG}.
@type WNOHANG: C{int}
@ivar raiseFork: if not C{None}, subsequent calls to fork will raise this
object.
@type raiseFork: C{NoneType} or C{Exception}
@ivar raiseExec: if set, subsequent calls to execvpe will raise an error.
@type raiseExec: C{bool}
@ivar fdio: fake file object returned by calls to fdopen.
@type fdio: C{StringIO.StringIO}
@ivar actions: hold names of some actions executed by the object, in order
of execution.
@type actions: C{list} of C{str}
@ivar closed: keep track of the file descriptor closed.
@param closed: C{list} of C{int}
@ivar child: whether fork return for the child or the parent.
@type child: C{bool}
@ivar pipeCount: count the number of time that C{os.pipe} has been called.
@type pipeCount: C{int}
@ivar raiseWaitPid: if set, subsequent calls to waitpid will raise
the error specified.
@type raiseWaitPid: C{None} or a class
@ivar waitChild: if set, subsequent calls to waitpid will return it.
@type waitChild: C{None} or a tuple
@ivar euid: the uid returned by the fake C{os.geteuid}
@type euid: C{int}
@ivar egid: the gid returned by the fake C{os.getegid}
@type egid: C{int}
@ivar seteuidCalls: stored results of C{os.seteuid} calls.
@type seteuidCalls: C{list}
@ivar setegidCalls: stored results of C{os.setegid} calls.
@type setegidCalls: C{list}
@ivar path: the path returned by C{os.path.expanduser}.
@type path: C{str}
@ivar raiseKill: if set, subsequent call to kill will raise the error
specified.
@type raiseKill: C{None} or an exception instance.
"""
exited = False
raiseExec = False
fdio = None
child = True
raiseWaitPid = None
raiseFork = None
waitChild = None
euid = 0
egid = 0
path = None
raiseKill = None
def __init__(self):
"""
Initialize data structures.
"""
self.actions = []
self.closed = []
self.pipeCount = 0
self.O_RDWR = -1
self.O_NOCTTY = -2
self.WNOHANG = -4
self.WEXITSTATUS = lambda x: 0
self.WIFEXITED = lambda x: 1
self.seteuidCalls = []
self.setegidCalls = []
def open(self, dev, flags):
"""
Fake C{os.open}. Return a non fd number to be sure it's not used
elsewhere.
"""
return -3
def fstat(self, fd):
"""
Fake C{os.fstat}. Return a C{os.stat_result} filled with garbage.
"""
return os.stat_result((0,) * 10)
def fdopen(self, fd, flag):
"""
Fake C{os.fdopen}. Return a StringIO object whose content can be tested
later via C{self.fdio}.
"""
self.fdio = StringIO.StringIO()
return self.fdio
def setsid(self):
"""
Fake C{os.setsid}. Do nothing.
"""
def fork(self):
"""
Fake C{os.fork}. Save the action in C{self.actions}, and return 0 if
C{self.child} is set, or a dumb number.
"""
self.actions.append(('fork', gc.isenabled()))
if self.raiseFork is not None:
raise self.raiseFork
elif self.child:
# Child result is 0
return 0
else:
return 21
def close(self, fd):
"""
Fake C{os.close}, saving the closed fd in C{self.closed}.
"""
self.closed.append(fd)
def dup2(self, fd1, fd2):
"""
Fake C{os.dup2}. Do nothing.
"""
def write(self, fd, data):
"""
Fake C{os.write}. Do nothing.
"""
def execvpe(self, command, args, env):
"""
Fake C{os.execvpe}. Save the action, and raise an error if
C{self.raiseExec} is set.
"""
self.actions.append('exec')
if self.raiseExec:
raise RuntimeError("Bar")
def pipe(self):
"""
Fake C{os.pipe}. Return non fd numbers to be sure it's not used
elsewhere, and increment C{self.pipeCount}. This is used to uniquify
the result.
"""
self.pipeCount += 1
return - 2 * self.pipeCount + 1, - 2 * self.pipeCount
def ttyname(self, fd):
"""
Fake C{os.ttyname}. Return a dumb string.
"""
return "foo"
def _exit(self, code):
"""
Fake C{os._exit}. Save the action, set the C{self.exited} flag, and
raise C{SystemError}.
"""
self.actions.append('exit')
self.exited = True
# Don't forget to raise an error, or you'll end up in parent
# code path.
raise SystemError()
def ioctl(self, fd, flags, arg):
"""
Override C{fcntl.ioctl}. Do nothing.
"""
def setNonBlocking(self, fd):
"""
Override C{fdesc.setNonBlocking}. Do nothing.
"""
def waitpid(self, pid, options):
"""
Override C{os.waitpid}. Return values meaning that the child process
has exited, save executed action.
"""
self.actions.append('waitpid')
if self.raiseWaitPid is not None:
raise self.raiseWaitPid
if self.waitChild is not None:
return self.waitChild
return 1, 0
def settrace(self, arg):
"""
Override C{sys.settrace} to keep coverage working.
"""
def getgid(self):
"""
Override C{os.getgid}. Return a dumb number.
"""
return 1235
def getuid(self):
"""
Override C{os.getuid}. Return a dumb number.
"""
return 1237
def setuid(self, val):
"""
Override C{os.setuid}. Do nothing.
"""
self.actions.append(('setuid', val))
def setgid(self, val):
"""
Override C{os.setgid}. Do nothing.
"""
self.actions.append(('setgid', val))
def setregid(self, val1, val2):
"""
Override C{os.setregid}. Do nothing.
"""
self.actions.append(('setregid', val1, val2))
def setreuid(self, val1, val2):
"""
Override C{os.setreuid}. Save the action.
"""
self.actions.append(('setreuid', val1, val2))
def switchUID(self, uid, gid):
"""
Override C{util.switchuid}. Save the action.
"""
self.actions.append(('switchuid', uid, gid))
def openpty(self):
"""
Override C{pty.openpty}, returning fake file descriptors.
"""
return -12, -13
def geteuid(self):
"""
Mock C{os.geteuid}, returning C{self.euid} instead.
"""
return self.euid
def getegid(self):
"""
Mock C{os.getegid}, returning C{self.egid} instead.
"""
return self.egid
def seteuid(self, egid):
"""
Mock C{os.seteuid}, store result.
"""
self.seteuidCalls.append(egid)
def setegid(self, egid):
"""
Mock C{os.setegid}, store result.
"""
self.setegidCalls.append(egid)
def expanduser(self, path):
"""
Mock C{os.path.expanduser}.
"""
return self.path
def getpwnam(self, user):
"""
Mock C{pwd.getpwnam}.
"""
return 0, 0, 1, 2
def listdir(self, path):
"""
Override C{os.listdir}, returning fake contents of '/dev/fd'
"""
return "-1", "-2"
def kill(self, pid, signalID):
"""
Override C{os.kill}: save the action and raise C{self.raiseKill} if
specified.
"""
self.actions.append(('kill', pid, signalID))
if self.raiseKill is not None:
raise self.raiseKill
if process is not None:
class DumbProcessWriter(process.ProcessWriter):
"""
A fake L{process.ProcessWriter} used for tests.
"""
def startReading(self):
"""
Here's the faking: don't do anything here.
"""
class DumbProcessReader(process.ProcessReader):
"""
A fake L{process.ProcessReader} used for tests.
"""
def startReading(self):
"""
Here's the faking: don't do anything here.
"""
class DumbPTYProcess(process.PTYProcess):
"""
A fake L{process.PTYProcess} used for tests.
"""
def startReading(self):
"""
Here's the faking: don't do anything here.
"""
class MockProcessTestCase(unittest.TestCase):
"""
Mock a process runner to test forked child code path.
"""
if process is None:
skip = "twisted.internet.process is never used on Windows"
def setUp(self):
"""
Replace L{process} os, fcntl, sys, switchUID, fdesc and pty modules
with the mock class L{MockOS}.
"""
if gc.isenabled():
self.addCleanup(gc.enable)
else:
self.addCleanup(gc.disable)
self.mockos = MockOS()
self.mockos.euid = 1236
self.mockos.egid = 1234
self.patch(process, "os", self.mockos)
self.patch(process, "fcntl", self.mockos)
self.patch(process, "sys", self.mockos)
self.patch(process, "switchUID", self.mockos.switchUID)
self.patch(process, "fdesc", self.mockos)
self.patch(process.Process, "processReaderFactory", DumbProcessReader)
self.patch(process.Process, "processWriterFactory", DumbProcessWriter)
self.patch(process, "pty", self.mockos)
self.mocksig = MockSignal()
self.patch(process, "signal", self.mocksig)
def tearDown(self):
"""
Reset processes registered for reap.
"""
process.reapProcessHandlers = {}
def test_mockFork(self):
"""
Test a classic spawnProcess. Check the path of the client code:
fork, exec, exit.
"""
gc.enable()
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
try:
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
except SystemError:
self.assert_(self.mockos.exited)
self.assertEqual(
self.mockos.actions, [("fork", False), "exec", "exit"])
else:
self.fail("Should not be here")
# It should leave the garbage collector disabled.
self.assertFalse(gc.isenabled())
def _mockForkInParentTest(self):
"""
Assert that in the main process, spawnProcess disables the garbage
collector, calls fork, closes the pipe file descriptors it created for
the child process, and calls waitpid.
"""
self.mockos.child = False
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
# It should close the first read pipe, and the 2 last write pipes
self.assertEqual(set(self.mockos.closed), set([-1, -4, -6]))
self.assertEqual(self.mockos.actions, [("fork", False), "waitpid"])
def test_mockForkInParentGarbageCollectorEnabled(self):
"""
The garbage collector should be enabled when L{reactor.spawnProcess}
returns if it was initially enabled.
@see L{_mockForkInParentTest}
"""
gc.enable()
self._mockForkInParentTest()
self.assertTrue(gc.isenabled())
def test_mockForkInParentGarbageCollectorDisabled(self):
"""
The garbage collector should be disabled when L{reactor.spawnProcess}
returns if it was initially disabled.
@see L{_mockForkInParentTest}
"""
gc.disable()
self._mockForkInParentTest()
self.assertFalse(gc.isenabled())
def test_mockForkTTY(self):
"""
Test a TTY spawnProcess: check the path of the client code:
fork, exec, exit.
"""
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
try:
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=True)
except SystemError:
self.assert_(self.mockos.exited)
self.assertEqual(
self.mockos.actions, [("fork", False), "exec", "exit"])
else:
self.fail("Should not be here")
def _mockWithForkError(self):
"""
Assert that if the fork call fails, no other process setup calls are
made and that spawnProcess raises the exception fork raised.
"""
self.mockos.raiseFork = OSError(errno.EAGAIN, None)
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None)
self.assertEqual(self.mockos.actions, [("fork", False)])
def test_mockWithForkErrorGarbageCollectorEnabled(self):
"""
The garbage collector should be enabled when L{reactor.spawnProcess}
raises because L{os.fork} raised, if it was initially enabled.
"""
gc.enable()
self._mockWithForkError()
self.assertTrue(gc.isenabled())
def test_mockWithForkErrorGarbageCollectorDisabled(self):
"""
The garbage collector should be disabled when
L{reactor.spawnProcess} raises because L{os.fork} raised, if it was
initially disabled.
"""
gc.disable()
self._mockWithForkError()
self.assertFalse(gc.isenabled())
def test_mockForkErrorCloseFDs(self):
"""
When C{os.fork} raises an exception, the file descriptors created
before are closed and don't leak.
"""
self._mockWithForkError()
self.assertEqual(set(self.mockos.closed), set([-1, -4, -6, -2, -3, -5]))
def test_mockForkErrorGivenFDs(self):
"""
When C{os.forks} raises an exception and that file descriptors have
been specified with the C{childFDs} arguments of
L{reactor.spawnProcess}, they are not closed.
"""
self.mockos.raiseFork = OSError(errno.EAGAIN, None)
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None,
childFDs={0: -10, 1: -11, 2: -13})
self.assertEqual(self.mockos.actions, [("fork", False)])
self.assertEqual(self.mockos.closed, [])
# We can also put "r" or "w" to let twisted create the pipes
self.assertRaises(OSError, reactor.spawnProcess, protocol, None,
childFDs={0: "r", 1: -11, 2: -13})
self.assertEqual(set(self.mockos.closed), set([-1, -2]))
def test_mockForkErrorClosePTY(self):
"""
When C{os.fork} raises an exception, the file descriptors created by
C{pty.openpty} are closed and don't leak, when C{usePTY} is set to
C{True}.
"""
self.mockos.raiseFork = OSError(errno.EAGAIN, None)
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None,
usePTY=True)
self.assertEqual(self.mockos.actions, [("fork", False)])
self.assertEqual(set(self.mockos.closed), set([-12, -13]))
def test_mockForkErrorPTYGivenFDs(self):
"""
If a tuple is passed to C{usePTY} to specify slave and master file
descriptors and that C{os.fork} raises an exception, these file
descriptors aren't closed.
"""
self.mockos.raiseFork = OSError(errno.EAGAIN, None)
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None,
usePTY=(-20, -21, 'foo'))
self.assertEqual(self.mockos.actions, [("fork", False)])
self.assertEqual(self.mockos.closed, [])
def test_mockWithExecError(self):
"""
Spawn a process but simulate an error during execution in the client
path: C{os.execvpe} raises an error. It should close all the standard
fds, try to print the error encountered, and exit cleanly.
"""
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
self.mockos.raiseExec = True
try:
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
except SystemError:
self.assert_(self.mockos.exited)
self.assertEqual(
self.mockos.actions, [("fork", False), "exec", "exit"])
# Check that fd have been closed
self.assertIn(0, self.mockos.closed)
self.assertIn(1, self.mockos.closed)
self.assertIn(2, self.mockos.closed)
# Check content of traceback
self.assertIn("RuntimeError: Bar", self.mockos.fdio.getvalue())
else:
self.fail("Should not be here")
def test_mockSetUid(self):
"""
Try creating a process with setting its uid: it's almost the same path
as the standard path, but with a C{switchUID} call before the exec.
"""
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
try:
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False, uid=8080)
except SystemError:
self.assert_(self.mockos.exited)
self.assertEqual(
self.mockos.actions,
[('fork', False), ('setuid', 0), ('setgid', 0),
('switchuid', 8080, 1234), 'exec', 'exit'])
else:
self.fail("Should not be here")
def test_mockSetUidInParent(self):
"""
When spawning a child process with a UID different from the UID of the
current process, the current process does not have its UID changed.
"""
self.mockos.child = False
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False, uid=8080)
self.assertEqual(self.mockos.actions, [('fork', False), 'waitpid'])
def test_mockPTYSetUid(self):
"""
Try creating a PTY process with setting its uid: it's almost the same
path as the standard path, but with a C{switchUID} call before the
exec.
"""
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
try:
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=True, uid=8081)
except SystemError:
self.assertTrue(self.mockos.exited)
self.assertEqual(
self.mockos.actions,
[('fork', False), ('setuid', 0), ('setgid', 0),
('switchuid', 8081, 1234), 'exec', 'exit'])
else:
self.fail("Should not be here")
def test_mockPTYSetUidInParent(self):
"""
When spawning a child process with PTY and a UID different from the UID
of the current process, the current process does not have its UID
changed.
"""
self.mockos.child = False
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
oldPTYProcess = process.PTYProcess
try:
process.PTYProcess = DumbPTYProcess
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=True, uid=8080)
finally:
process.PTYProcess = oldPTYProcess
self.assertEqual(self.mockos.actions, [('fork', False), 'waitpid'])
def test_mockWithWaitError(self):
"""
Test that reapProcess logs errors raised.
"""
self.mockos.child = False
cmd = '/mock/ouch'
self.mockos.waitChild = (0, 0)
d = defer.Deferred()
p = TrivialProcessProtocol(d)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
self.assertEqual(self.mockos.actions, [("fork", False), "waitpid"])
self.mockos.raiseWaitPid = OSError()
proc.reapProcess()
errors = self.flushLoggedErrors()
self.assertEqual(len(errors), 1)
errors[0].trap(OSError)
def test_mockErrorECHILDInReapProcess(self):
"""
Test that reapProcess doesn't log anything when waitpid raises a
C{OSError} with errno C{ECHILD}.
"""
self.mockos.child = False
cmd = '/mock/ouch'
self.mockos.waitChild = (0, 0)
d = defer.Deferred()
p = TrivialProcessProtocol(d)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
self.assertEqual(self.mockos.actions, [("fork", False), "waitpid"])
self.mockos.raiseWaitPid = OSError()
self.mockos.raiseWaitPid.errno = errno.ECHILD
# This should not produce any errors
proc.reapProcess()
def test_mockErrorInPipe(self):
"""
If C{os.pipe} raises an exception after some pipes where created, the
created pipes are closed and don't leak.
"""
pipes = [-1, -2, -3, -4]
def pipe():
try:
return pipes.pop(0), pipes.pop(0)
except IndexError:
raise OSError()
self.mockos.pipe = pipe
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None)
self.assertEqual(self.mockos.actions, [])
self.assertEqual(set(self.mockos.closed), set([-4, -3, -2, -1]))
def test_kill(self):
"""
L{process.Process.signalProcess} calls C{os.kill} translating the given
signal string to the PID.
"""
self.mockos.child = False
self.mockos.waitChild = (0, 0)
cmd = '/mock/ouch'
p = TrivialProcessProtocol(None)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None, usePTY=False)
proc.signalProcess("KILL")
self.assertEqual(self.mockos.actions,
[('fork', False), 'waitpid', ('kill', 21, signal.SIGKILL)])
def test_killExited(self):
"""
L{process.Process.signalProcess} raises L{error.ProcessExitedAlready}
if the process has exited.
"""
self.mockos.child = False
cmd = '/mock/ouch'
p = TrivialProcessProtocol(None)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None, usePTY=False)
# We didn't specify a waitpid value, so the waitpid call in
# registerReapProcessHandler has already reaped the process
self.assertRaises(error.ProcessExitedAlready,
proc.signalProcess, "KILL")
def test_killExitedButNotDetected(self):
"""
L{process.Process.signalProcess} raises L{error.ProcessExitedAlready}
if the process has exited but that twisted hasn't seen it (for example,
if the process has been waited outside of twisted): C{os.kill} then
raise C{OSError} with C{errno.ESRCH} as errno.
"""
self.mockos.child = False
self.mockos.waitChild = (0, 0)
cmd = '/mock/ouch'
p = TrivialProcessProtocol(None)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None, usePTY=False)
self.mockos.raiseKill = OSError(errno.ESRCH, "Not found")
self.assertRaises(error.ProcessExitedAlready,
proc.signalProcess, "KILL")
def test_killErrorInKill(self):
"""
L{process.Process.signalProcess} doesn't mask C{OSError} exceptions if
the errno is different from C{errno.ESRCH}.
"""
self.mockos.child = False
self.mockos.waitChild = (0, 0)
cmd = '/mock/ouch'
p = TrivialProcessProtocol(None)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None, usePTY=False)
self.mockos.raiseKill = OSError(errno.EINVAL, "Invalid signal")
err = self.assertRaises(OSError,
proc.signalProcess, "KILL")
self.assertEquals(err.errno, errno.EINVAL)
class PosixProcessTestCase(unittest.TestCase, PosixProcessBase):
# add two non-pty test cases
def test_stderr(self):
"""
Bytes written to stderr by the spawned process are passed to the
C{errReceived} callback on the C{ProcessProtocol} passed to
C{spawnProcess}.
"""
cmd = sys.executable
value = "42"
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, cmd,
[cmd, "-c",
"import sys; sys.stderr.write('%s')" % (value,)],
env=None, path="/tmp",
usePTY=self.usePTY)
def processEnded(ign):
self.assertEqual(value, p.errF.getvalue())
return d.addCallback(processEnded)
def testProcess(self):
cmd = self.getCommand('gzip')
s = "there's no place like home!\n" * 3
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, cmd, [cmd, "-c"], env=None, path="/tmp",
usePTY=self.usePTY)
p.transport.write(s)
p.transport.closeStdin()
def processEnded(ign):
f = p.outF
f.seek(0, 0)
gf = gzip.GzipFile(fileobj=f)
self.assertEqual(gf.read(), s)
return d.addCallback(processEnded)
class PosixProcessTestCasePTY(unittest.TestCase, PosixProcessBase):
"""
Just like PosixProcessTestCase, but use ptys instead of pipes.
"""
usePTY = True
# PTYs only offer one input and one output. What still makes sense?
# testNormalTermination
# test_abnormalTermination
# testSignal
# testProcess, but not without p.transport.closeStdin
# might be solveable: TODO: add test if so
def testOpeningTTY(self):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_tty.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
path=None, usePTY=self.usePTY)
p.transport.write("hello world!\n")
def processEnded(ign):
self.assertRaises(
error.ProcessExitedAlready, p.transport.signalProcess, 'HUP')
self.assertEqual(
p.outF.getvalue(),
"hello world!\r\nhello world!\r\n",
"Error message from process_tty follows:\n\n%s\n\n" % p.outF.getvalue())
return d.addCallback(processEnded)
def testBadArgs(self):
pyExe = sys.executable
pyArgs = [pyExe, "-u", "-c", "print 'hello'"]
p = Accumulator()
self.assertRaises(ValueError, reactor.spawnProcess, p, pyExe, pyArgs,
usePTY=1, childFDs={1:'r'})
class Win32SignalProtocol(SignalProtocol):
"""
A win32-specific process protocol that handles C{processEnded}
differently: processes should exit with exit code 1.
"""
def processEnded(self, reason):
"""
Callback C{self.deferred} with C{None} if C{reason} is a
L{error.ProcessTerminated} failure with C{exitCode} set to 1.
Otherwise, errback with a C{ValueError} describing the problem.
"""
if not reason.check(error.ProcessTerminated):
return self.deferred.errback(
ValueError("wrong termination: %s" % (reason,)))
v = reason.value
if v.exitCode != 1:
return self.deferred.errback(
ValueError("Wrong exit code: %s" % (reason.exitCode,)))
self.deferred.callback(None)
class Win32ProcessTestCase(unittest.TestCase):
"""
Test process programs that are packaged with twisted.
"""
def testStdinReader(self):
pyExe = sys.executable
scriptPath = util.sibpath(__file__, "process_stdinreader.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, pyExe, [pyExe, "-u", scriptPath], env=None,
path=None)
p.transport.write("hello, world")
p.transport.closeStdin()
def processEnded(ign):
self.assertEqual(p.errF.getvalue(), "err\nerr\n")
self.assertEqual(p.outF.getvalue(), "out\nhello, world\nout\n")
return d.addCallback(processEnded)
def testBadArgs(self):
pyExe = sys.executable
pyArgs = [pyExe, "-u", "-c", "print 'hello'"]
p = Accumulator()
self.assertRaises(ValueError,
reactor.spawnProcess, p, pyExe, pyArgs, uid=1)
self.assertRaises(ValueError,
reactor.spawnProcess, p, pyExe, pyArgs, gid=1)
self.assertRaises(ValueError,
reactor.spawnProcess, p, pyExe, pyArgs, usePTY=1)
self.assertRaises(ValueError,
reactor.spawnProcess, p, pyExe, pyArgs, childFDs={1:'r'})
def _testSignal(self, sig):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_signal.py")
d = defer.Deferred()
p = Win32SignalProtocol(d, sig)
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None)
return d
def test_signalTERM(self):
"""
Sending the SIGTERM signal terminates a created process, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} attribute set to 1.
"""
return self._testSignal('TERM')
def test_signalINT(self):
"""
Sending the SIGINT signal terminates a created process, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} attribute set to 1.
"""
return self._testSignal('INT')
def test_signalKILL(self):
"""
Sending the SIGKILL signal terminates a created process, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} attribute set to 1.
"""
return self._testSignal('KILL')
def test_closeHandles(self):
"""
The win32 handles should be properly closed when the process exits.
"""
import win32api
connected = defer.Deferred()
ended = defer.Deferred()
class SimpleProtocol(protocol.ProcessProtocol):
"""
A protocol that fires deferreds when connected and disconnected.
"""
def makeConnection(self, transport):
connected.callback(transport)
def processEnded(self, reason):
ended.callback(None)
p = SimpleProtocol()
pyExe = sys.executable
pyArgs = [pyExe, "-u", "-c", "print 'hello'"]
proc = reactor.spawnProcess(p, pyExe, pyArgs)
def cbConnected(transport):
self.assertIdentical(transport, proc)
# perform a basic validity test on the handles
win32api.GetHandleInformation(proc.hProcess)
win32api.GetHandleInformation(proc.hThread)
# And save their values for later
self.hProcess = proc.hProcess
self.hThread = proc.hThread
connected.addCallback(cbConnected)
def checkTerminated(ignored):
# The attributes on the process object must be reset...
self.assertIdentical(proc.pid, None)
self.assertIdentical(proc.hProcess, None)
self.assertIdentical(proc.hThread, None)
# ...and the handles must be closed.
self.assertRaises(win32api.error,
win32api.GetHandleInformation, self.hProcess)
self.assertRaises(win32api.error,
win32api.GetHandleInformation, self.hThread)
ended.addCallback(checkTerminated)
return defer.gatherResults([connected, ended])
class Win32UnicodeEnvironmentTest(unittest.TestCase):
"""
Tests for Unicode environment on Windows
"""
goodKey = u'UNICODE'
goodValue = u'UNICODE'
def test_encodableUnicodeEnvironment(self):
"""
Test C{os.environ} (inherited by every subprocess on Windows) that
contains an ascii-encodable Unicode string. This is different from
passing Unicode environment explicitly to spawnProcess (which is not
supported).
"""
os.environ[self.goodKey] = self.goodValue
self.addCleanup(operator.delitem, os.environ, self.goodKey)
p = GetEnvironmentDictionary.run(reactor, [], {})
def gotEnvironment(environ):
self.assertEqual(
environ[self.goodKey.encode('ascii')],
self.goodValue.encode('ascii'))
return p.getResult().addCallback(gotEnvironment)
class Dumbwin32procPidTest(unittest.TestCase):
"""
Simple test for the pid attribute of Process on win32.
"""
def test_pid(self):
"""
Launch process with mock win32process. The only mock aspect of this
module is that the pid of the process created will always be 42.
"""
from twisted.internet import _dumbwin32proc
from twisted.test import mock_win32process
self.patch(_dumbwin32proc, "win32process", mock_win32process)
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_cmdline.py")
d = defer.Deferred()
processProto = TrivialProcessProtocol(d)
comspec = str(os.environ["COMSPEC"])
cmd = [comspec, "/c", exe, scriptPath]
p = _dumbwin32proc.Process(reactor,
processProto,
None,
cmd,
{},
None)
self.assertEqual(42, p.pid)
self.assertEqual("<Process pid=42>", repr(p))
def pidCompleteCb(result):
self.assertEqual(None, p.pid)
return d.addCallback(pidCompleteCb)
class UtilTestCase(unittest.TestCase):
"""
Tests for process-related helper functions (currently only
L{procutils.which}.
"""
def setUp(self):
"""
Create several directories and files, some of which are executable
and some of which are not. Save the current PATH setting.
"""
j = os.path.join
base = self.mktemp()
self.foo = j(base, "foo")
self.baz = j(base, "baz")
self.foobar = j(self.foo, "bar")
self.foobaz = j(self.foo, "baz")
self.bazfoo = j(self.baz, "foo")
self.bazbar = j(self.baz, "bar")
for d in self.foobar, self.foobaz, self.bazfoo, self.bazbar:
os.makedirs(d)
for name, mode in [(j(self.foobaz, "executable"), 0700),
(j(self.foo, "executable"), 0700),
(j(self.bazfoo, "executable"), 0700),
(j(self.bazfoo, "executable.bin"), 0700),
(j(self.bazbar, "executable"), 0)]:
f = file(name, "w")
f.close()
os.chmod(name, mode)
self.oldPath = os.environ.get('PATH', None)
os.environ['PATH'] = os.pathsep.join((
self.foobar, self.foobaz, self.bazfoo, self.bazbar))
def tearDown(self):
"""
Restore the saved PATH setting, and set all created files readable
again so that they can be deleted easily.
"""
os.chmod(os.path.join(self.bazbar, "executable"), stat.S_IWUSR)
if self.oldPath is None:
try:
del os.environ['PATH']
except KeyError:
pass
else:
os.environ['PATH'] = self.oldPath
def test_whichWithoutPATH(self):
"""
Test that if C{os.environ} does not have a C{'PATH'} key,
L{procutils.which} returns an empty list.
"""
del os.environ['PATH']
self.assertEqual(procutils.which("executable"), [])
def testWhich(self):
j = os.path.join
paths = procutils.which("executable")
expectedPaths = [j(self.foobaz, "executable"),
j(self.bazfoo, "executable")]
if runtime.platform.isWindows():
expectedPaths.append(j(self.bazbar, "executable"))
self.assertEqual(paths, expectedPaths)
def testWhichPathExt(self):
j = os.path.join
old = os.environ.get('PATHEXT', None)
os.environ['PATHEXT'] = os.pathsep.join(('.bin', '.exe', '.sh'))
try:
paths = procutils.which("executable")
finally:
if old is None:
del os.environ['PATHEXT']
else:
os.environ['PATHEXT'] = old
expectedPaths = [j(self.foobaz, "executable"),
j(self.bazfoo, "executable"),
j(self.bazfoo, "executable.bin")]
if runtime.platform.isWindows():
expectedPaths.append(j(self.bazbar, "executable"))
self.assertEqual(paths, expectedPaths)
class ClosingPipesProcessProtocol(protocol.ProcessProtocol):
output = ''
errput = ''
def __init__(self, outOrErr):
self.deferred = defer.Deferred()
self.outOrErr = outOrErr
def processEnded(self, reason):
self.deferred.callback(reason)
def outReceived(self, data):
self.output += data
def errReceived(self, data):
self.errput += data
class ClosingPipes(unittest.TestCase):
def doit(self, fd):
"""
Create a child process and close one of its output descriptors using
L{IProcessTransport.closeStdout} or L{IProcessTransport.closeStderr}.
Return a L{Deferred} which fires after verifying that the descriptor was
really closed.
"""
p = ClosingPipesProcessProtocol(True)
self.assertFailure(p.deferred, error.ProcessTerminated)
p.deferred.addCallback(self._endProcess, p)
reactor.spawnProcess(
p, sys.executable, [
sys.executable, '-u', '-c',
'raw_input()\n'
'import sys, os, time\n'
# Give the system a bit of time to notice the closed
# descriptor. Another option would be to poll() for HUP
# instead of relying on an os.write to fail with SIGPIPE.
# However, that wouldn't work on OS X (or Windows?).
'for i in range(1000):\n'
' os.write(%d, "foo\\n")\n'
' time.sleep(0.01)\n'
'sys.exit(42)\n' % (fd,)
],
env=None)
if fd == 1:
p.transport.closeStdout()
elif fd == 2:
p.transport.closeStderr()
else:
raise RuntimeError
# Give the close time to propagate
p.transport.write('go\n')
# make the buggy case not hang
p.transport.closeStdin()
return p.deferred
def _endProcess(self, reason, p):
"""
Check that a failed write prevented the process from getting to its
custom exit code.
"""
# child must not get past that write without raising
self.assertNotEquals(
reason.exitCode, 42, 'process reason was %r' % reason)
self.assertEqual(p.output, '')
return p.errput
def test_stdout(self):
"""
ProcessProtocol.transport.closeStdout actually closes the pipe.
"""
d = self.doit(1)
def _check(errput):
self.assertIn('OSError', errput)
if runtime.platform.getType() != 'win32':
self.assertIn('Broken pipe', errput)
d.addCallback(_check)
return d
def test_stderr(self):
"""
ProcessProtocol.transport.closeStderr actually closes the pipe.
"""
d = self.doit(2)
def _check(errput):
# there should be no stderr open, so nothing for it to
# write the error to.
self.assertEqual(errput, '')
d.addCallback(_check)
return d
skipMessage = "wrong platform or reactor doesn't support IReactorProcess"
if (runtime.platform.getType() != 'posix') or (not interfaces.IReactorProcess(reactor, None)):
PosixProcessTestCase.skip = skipMessage
PosixProcessTestCasePTY.skip = skipMessage
TestTwoProcessesPosix.skip = skipMessage
FDTest.skip = skipMessage
if (runtime.platform.getType() != 'win32') or (not interfaces.IReactorProcess(reactor, None)):
Win32ProcessTestCase.skip = skipMessage
TestTwoProcessesNonPosix.skip = skipMessage
Dumbwin32procPidTest.skip = skipMessage
Win32UnicodeEnvironmentTest.skip = skipMessage
if not interfaces.IReactorProcess(reactor, None):
ProcessTestCase.skip = skipMessage
ClosingPipes.skip = skipMessage
| 32.12608 | 114 | 0.582593 |
acfa11a0e5344788b44c320149334494284c7b18 | 2,252 | py | Python | samples/create_marketplace_payment.py | onlined/iyzipay-python | bed8b57995cbfde76091f4bba1d20b7ff2e5c689 | [
"MIT"
] | null | null | null | samples/create_marketplace_payment.py | onlined/iyzipay-python | bed8b57995cbfde76091f4bba1d20b7ff2e5c689 | [
"MIT"
] | null | null | null | samples/create_marketplace_payment.py | onlined/iyzipay-python | bed8b57995cbfde76091f4bba1d20b7ff2e5c689 | [
"MIT"
] | null | null | null | import iyzipay
options = {
'api_key': iyzipay.api_key,
'secret_key': iyzipay.secret_key,
'base_url': iyzipay.base_url
}
payment_card = {
'cardHolderName': 'John Doe',
'cardNumber': '5528790000000008',
'expireMonth': '12',
'expireYear': '2030',
'cvc': '123',
'registerCard': '0'
}
buyer = {
'id': 'BY789',
'name': 'John',
'surname': 'Doe',
'gsmNumber': '+905350000000',
'email': 'email@email.com',
'identityNumber': '74300864791',
'lastLoginDate': '2015-10-05 12:43:35',
'registrationDate': '2013-04-21 15:12:09',
'registrationAddress': 'Nidakule Göztepe, Merdivenköy Mah. Bora Sok. No:1',
'ip': '85.34.78.112',
'city': 'Istanbul',
'country': 'Turkey',
'zipCode': '34732'
}
address = {
'contactName': 'Jane Doe',
'city': 'Istanbul',
'country': 'Turkey',
'address': 'Nidakule Göztepe, Merdivenköy Mah. Bora Sok. No:1',
'zipCode': '34732'
}
basket_items = [
{
'id': 'BI101',
'name': 'Binocular',
'category1': 'Collectibles',
'category2': 'Accessories',
'itemType': 'PHYSICAL',
'price': '0.3',
'subMerchantKey': 'sub merchant key',
'subMerchantPrice': '0.27'
},
{
'id': 'BI102',
'name': 'Game code',
'category1': 'Game',
'category2': 'Online Game Items',
'itemType': 'VIRTUAL',
'price': '0.5',
'subMerchantKey': 'sub merchant key',
'subMerchantPrice': '0.42'
},
{
'id': 'BI103',
'name': 'Usb',
'category1': 'Electronics',
'category2': 'Usb / Cable',
'itemType': 'PHYSICAL',
'price': '0.2',
'subMerchantKey': 'sub merchant key',
'subMerchantPrice': '0.18'
}
]
request = {
'locale': 'tr',
'conversationId': '123456789',
'price': '1',
'paidPrice': '1.2',
'currency': 'TRY',
'installment': '1',
'basketId': 'B67832',
'paymentChannel': 'WEB',
'paymentGroup': 'PRODUCT',
'paymentCard': payment_card,
'buyer': buyer,
'shippingAddress': address,
'billingAddress': address,
'basketItems': basket_items
}
payment = iyzipay.Payment().create(request, options)
print(payment.body)
| 23.705263 | 79 | 0.547957 |
acfa1303492deceb99c0d329e9b20eece0654584 | 8,965 | py | Python | research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_test.py | Santhanalakshmimano/SpeedBump_detection_usingCV | 7b68f260cf1351d757983a48c5a62e063df807c9 | [
"Apache-2.0"
] | null | null | null | research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_test.py | Santhanalakshmimano/SpeedBump_detection_usingCV | 7b68f260cf1351d757983a48c5a62e063df807c9 | [
"Apache-2.0"
] | null | null | null | research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_test.py | Santhanalakshmimano/SpeedBump_detection_usingCV | 7b68f260cf1351d757983a48c5a62e063df807c9 | [
"Apache-2.0"
] | 1 | 2021-07-13T01:22:08.000Z | 2021-07-13T01:22:08.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v2_fpn_feature_extractor."""
import numpy as np
import tensorflow as tf
from models import ssd_feature_extractor_test
from models import ssd_mobilenet_v2_fpn_feature_extractor
slim = tf.contrib.slim
class SsdMobilenetV2FpnFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
is_training=True, use_explicit_padding=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
return (ssd_mobilenet_v2_fpn_feature_extractor.
SSDMobileNetV2FpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_returns_correct_shapes_384(self):
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_with_dynamic_image_shape(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(self):
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32),
(2, 8, 8, 32), (2, 4, 4, 32),
(2, 2, 2, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_raises_error_with_invalid_image_size(self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple)
def test_preprocess_returns_correct_value_range(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV2'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name)
def test_fused_batchnorm(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(
any(op.type == 'FusedBatchNorm'
for op in tf.get_default_graph().get_operations()))
def test_get_expected_feature_map_variable_names(self):
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_maps_variables = set([
# Mobilenet V2 feature maps
'MobilenetV2/expanded_conv_4/depthwise/depthwise_weights',
'MobilenetV2/expanded_conv_7/depthwise/depthwise_weights',
'MobilenetV2/expanded_conv_14/depthwise/depthwise_weights',
'MobilenetV2/Conv_1/weights',
# FPN layers
'MobilenetV2/fpn/bottom_up_Conv2d_20/weights',
'MobilenetV2/fpn/bottom_up_Conv2d_21/weights',
'MobilenetV2/fpn/smoothing_1/weights',
'MobilenetV2/fpn/smoothing_2/weights',
'MobilenetV2/fpn/projection_1/weights',
'MobilenetV2/fpn/projection_2/weights',
'MobilenetV2/fpn/projection_3/weights',
])
g = tf.Graph()
with g.as_default():
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple)
feature_extractor.extract_features(preprocessed_inputs)
actual_variable_set = set([
var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
])
variable_intersection = expected_feature_maps_variables.intersection(
actual_variable_set)
self.assertSetEqual(expected_feature_maps_variables,
variable_intersection)
if __name__ == '__main__':
tf.test.main()
| 43.309179 | 80 | 0.688901 |
acfa13499ab4235f70195cebd741e32b224f2db0 | 3,019 | py | Python | src/OTLMOW/PostenMapping/Model/Post050403223.py | davidvlaminck/OTLClassPython | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | 2 | 2022-02-01T08:58:11.000Z | 2022-02-08T13:35:17.000Z | src/OTLMOW/PostenMapping/Model/Post050403223.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | src/OTLMOW/PostenMapping/Model/Post050403223.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | # coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post050403223(StandaardPost):
def __init__(self):
super().__init__(
nummer='0504.03223',
beschrijving='Steenslagfundering met continue korrelverdeling zonder toevoegsels, type II volgens 5-4.3, dikte 23 cm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw.type',
dotnotatie='type',
defaultWaarde='steenslag-met-continue-korrelverdeling-zonder-toevoegsel---type-II',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.03223')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotatie='laagRol',
defaultWaarde='fundering',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.03223')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotatie='dikte',
defaultWaarde='23',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.03223')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotatie='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.03223')])
| 46.446154 | 130 | 0.571712 |
acfa137098660edf4515ebfa5e955ef7fa7f063f | 9,704 | py | Python | wendy/__main__.py | kangvcar/Wendy | 0e0a593ebf8df5983904ed3f4da1e19c4df93292 | [
"MIT"
] | 6 | 2021-07-19T08:03:29.000Z | 2022-02-19T08:21:26.000Z | wendy/__main__.py | kangvcar/Wendy | 0e0a593ebf8df5983904ed3f4da1e19c4df93292 | [
"MIT"
] | null | null | null | wendy/__main__.py | kangvcar/Wendy | 0e0a593ebf8df5983904ed3f4da1e19c4df93292 | [
"MIT"
] | 2 | 2021-07-19T06:59:27.000Z | 2022-02-19T08:21:27.000Z | # Launch browser via CEFPython
# Example of embedding CEF Python browser using wxPython library.
# This example has a top menu and a browser widget without navigation bar.
# Tested configurations:
# - wxPython 4.0 on Windows/Mac/Linux
# - wxPython 3.0 on Windows/Mac
# - wxPython 2.8 on Linux
# - CEF Python v55.4+
import os
import platform
import socket
import sys
import threading
import wx
from cefpython3 import cefpython as cef
from wendy import start_gevent, APP_NAME, get_root_path
WindowUtils = cef.WindowUtils()
# Platforms
WINDOWS = (platform.system() == "Windows")
LINUX = (platform.system() == "Linux")
MAC = (platform.system() == "Darwin")
# Configuration
TITLE = APP_NAME
WIDTH = 1400
HEIGHT = 850
# Globals
g_count_windows = 0
def find_port() -> int:
"""
Finds available port for Gevent / Flask
:return: Available port
"""
port_attempts = 0
while port_attempts < 1000:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
app_port = sock.getsockname()[1]
sock.close()
print("PORT: " + str(app_port))
return app_port
except:
port_attempts += 1
print("FAILED AFTER 1000 PORT ATTEMPTS")
sys.exit(1)
def start_server(app_port: int):
"""
Starts Gevent which runs Flask
:param app_port: Port that Gevent will use
"""
start_gevent.start_gevent(app_port)
def main():
check_versions()
sys.excepthook = cef.ExceptHook # To shutdown all CEF processes on error
settings = {}
if WINDOWS:
# noinspection PyUnresolvedReferences, PyArgumentList
cef.DpiAware.EnableHighDpiSupport()
cef.Initialize(settings=settings)
app = CefApp(False)
app.MainLoop()
del app # Must destroy before calling Shutdown
if not MAC:
# On Mac shutdown is called in OnClose
cef.Shutdown()
def check_versions():
print("[wxpython.py] CEF Python {ver}".format(ver=cef.__version__))
print("[wxpython.py] Python {ver} {arch}".format(
ver=platform.python_version(), arch=platform.architecture()[0]))
print("[wxpython.py] wxPython {ver}".format(ver=wx.version()))
# CEF Python version requirement
assert cef.__version__ >= "55.3", "CEF Python v55.3+ required to run this"
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, parent=None, id=wx.ID_ANY,
title=TITLE, size=(WIDTH, HEIGHT))
self.browser = None
# Must ignore X11 errors like 'BadWindow' and others by
# installing X11 error handlers. This must be done after
# wx was intialized.
if LINUX:
WindowUtils.InstallX11ErrorHandlers()
global g_count_windows
g_count_windows += 1
self.setup_icon()
self.Bind(wx.EVT_CLOSE, self.OnClose)
# Set wx.WANTS_CHARS style for the keyboard to work.
# This style also needs to be set for all parent controls.
self.browser_panel = wx.Panel(self, style=wx.WANTS_CHARS)
self.browser_panel.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.browser_panel.Bind(wx.EVT_SIZE, self.OnSize)
if MAC:
try:
# noinspection PyUnresolvedReferences
from AppKit import NSApp
# Make the content view for the window have a layer.
# This will make all sub-views have layers. This is
# necessary to ensure correct layer ordering of all
# child views and their layers. This fixes Window
# glitchiness during initial loading on Mac (Issue #371).
NSApp.windows()[0].contentView().setWantsLayer_(True)
except ImportError:
print("[wxpython.py] Warning: PyObjC package is missing, "
"cannot fix Issue #371")
print("[wxpython.py] To install PyObjC type: "
"pip install -U pyobjc")
if LINUX:
# On Linux must show before embedding browser, so that handle
# is available (Issue #347).
self.Show()
# In wxPython 3.0 and wxPython 4.0 on Linux handle is
# still not yet available, so must delay embedding browser
# (Issue #349).
if wx.version().startswith("3.") or wx.version().startswith("4."):
wx.CallLater(100, self.embed_browser)
else:
# This works fine in wxPython 2.8 on Linux
self.embed_browser()
else:
# a = start_server(5000)
self.embed_browser()
self.Show()
def setup_icon(self):
# icon_file = get_root_path('icon.ico')
# # wx.IconFromBitmap is not available on Linux in wxPython 3.0/4.0
# if os.path.exists(icon_file) and hasattr(wx, "IconFromBitmap"):
# icon = wx.IconFromBitmap(wx.Bitmap(icon_file, wx.BITMAP_TYPE_PNG))
# self.SetIcon(icon)
ib = wx.IconBundle()
ib.AddIcon(get_root_path('icon.ico'), wx.BITMAP_TYPE_ANY)
self.SetIcons(ib)
def embed_browser(self):
window_info = cef.WindowInfo()
(width, height) = self.browser_panel.GetClientSize().Get()
assert self.browser_panel.GetHandle(), "Window handle not available yet"
window_info.SetAsChild(self.browser_panel.GetHandle(),
[0, 0, width, height])
# Wendy
app_port = find_port()
t = threading.Thread(target=start_server, args=(app_port,))
t.daemon = True
t.start()
flask_url = 'http://127.0.0.1:' + str(app_port) + '/index.html'
# start_server(5000)
# flask_url = "http://localhost:5000"
print(flask_url)
self.browser = cef.CreateBrowserSync(window_info, url=flask_url)
# self.browser = cef.CreateBrowserSync(window_info, url="http://localhost:5000")
self.browser.SetClientHandler(FocusHandler())
def OnSetFocus(self, _):
if not self.browser:
return
if WINDOWS:
WindowUtils.OnSetFocus(self.browser_panel.GetHandle(),
0, 0, 0)
self.browser.SetFocus(True)
def OnSize(self, _):
if not self.browser:
return
if WINDOWS:
WindowUtils.OnSize(self.browser_panel.GetHandle(),
0, 0, 0)
elif LINUX:
(x, y) = (0, 0)
(width, height) = self.browser_panel.GetSize().Get()
self.browser.SetBounds(x, y, width, height)
self.browser.NotifyMoveOrResizeStarted()
def OnClose(self, event):
print("[wxpython.py] OnClose called")
if not self.browser:
# May already be closing, may be called multiple times on Mac
return
if MAC:
# On Mac things work differently, other steps are required
self.browser.CloseBrowser()
self.clear_browser_references()
self.Destroy()
global g_count_windows
g_count_windows -= 1
if g_count_windows == 0:
cef.Shutdown()
wx.GetApp().ExitMainLoop()
# Call _exit otherwise app exits with code 255 (Issue #162).
# noinspection PyProtectedMember
os._exit(0)
else:
# Calling browser.CloseBrowser() and/or self.Destroy()
# in OnClose may cause app crash on some paltforms in
# some use cases, details in Issue #107.
self.browser.ParentWindowWillClose()
event.Skip()
self.clear_browser_references()
def clear_browser_references(self):
# Clear browser references that you keep anywhere in your
# code. All references must be cleared for CEF to shutdown cleanly.
self.browser = None
class FocusHandler(object):
def OnGotFocus(self, browser, **_):
# Temporary fix for focus issues on Linux (Issue #284).
if LINUX:
print("[wxpython.py] FocusHandler.OnGotFocus:"
" keyboard focus fix (Issue #284)")
browser.SetFocus(True)
class CefApp(wx.App):
def __init__(self, redirect):
self.timer = None
self.timer_id = 1
self.is_initialized = False
super(CefApp, self).__init__(redirect=redirect)
def OnPreInit(self):
super(CefApp, self).OnPreInit()
# On Mac with wxPython 4.0 the OnInit() event never gets
# called. Doing wx window creation in OnPreInit() seems to
# resolve the problem (Issue #350).
if MAC and wx.version().startswith("4."):
print("[wxpython.py] OnPreInit: initialize here"
" (wxPython 4.0 fix)")
self.initialize()
def OnInit(self):
self.initialize()
return True
def initialize(self):
if self.is_initialized:
return
self.is_initialized = True
self.create_timer()
frame = MainFrame()
self.SetTopWindow(frame)
frame.Show()
def create_timer(self):
# See also "Making a render loop":
# http://wiki.wxwidgets.org/Making_a_render_loop
# Another way would be to use EVT_IDLE in MainFrame.
self.timer = wx.Timer(self, self.timer_id)
self.Bind(wx.EVT_TIMER, self.on_timer, self.timer)
self.timer.Start(10) # 10ms timer
def on_timer(self, _):
cef.MessageLoopWork()
def OnExit(self):
self.timer.Stop()
return 0
if __name__ == '__main__':
main() | 32.563758 | 88 | 0.601195 |
acfa1566ffae0b94817cc6a02cfec34eeab88a6c | 68,465 | py | Python | core.py | wojsam/viur-html5 | 25483dbf7accde99223ad25c13f9efb8c25c1127 | [
"MIT"
] | 8 | 2017-11-24T10:00:01.000Z | 2019-12-08T09:03:32.000Z | core.py | wojsam/viur-html5 | 25483dbf7accde99223ad25c13f9efb8c25c1127 | [
"MIT"
] | 2 | 2019-06-27T13:10:58.000Z | 2020-01-16T17:11:35.000Z | core.py | wojsam/viur-html5 | 25483dbf7accde99223ad25c13f9efb8c25c1127 | [
"MIT"
] | 3 | 2018-01-21T01:09:49.000Z | 2019-10-21T13:23:28.000Z | import logging, string
########################################################################################################################
# DOM-access functions and variables
########################################################################################################################
try:
# Pyodide
from js import window, eval as jseval
document = window.document
except:
print("Emulation mode")
from xml.dom.minidom import parseString
jseval = None
window = None
document = parseString("<html><head /><body /></html>")
def domCreateAttribute(tag, ns=None):
"""
Creates a new HTML/SVG/... attribute
:param ns: the namespace. Default: HTML. Possible values: HTML, SVG, XBL, XUL
"""
uri = None
if ns == "SVG":
uri = "http://www.w3.org/2000/svg"
elif ns == "XBL":
uri = "http://www.mozilla.org/xbl"
elif ns == "XUL":
uri = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"
if uri:
return document.createAttribute(uri, tag)
return document.createAttribute(tag)
def domCreateElement(tag, ns=None):
"""
Creates a new HTML/SVG/... tag
:param ns: the namespace. Default: HTML. Possible values: HTML, SVG, XBL, XUL
"""
uri = None
if ns == "SVG":
uri = "http://www.w3.org/2000/svg"
elif ns == "XBL":
uri = "http://www.mozilla.org/xbl"
elif ns == "XUL":
uri = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"
if uri:
return document.createElementNS(uri, tag)
return document.createElement(tag)
def domCreateTextNode(txt=""):
return document.createTextNode(txt)
def domGetElementById(idTag):
return document.getElementById(idTag)
def domElementFromPoint(x, y):
return document.elementFromPoint(x, y)
def domGetElementsByTagName(tag):
items = document.getElementsByTagName(tag)
return [items.item(i) for i in range(0, int(items.length))] #pyodide interprets items.length as float, so convert to int
########################################################################################################################
# HTML Widgets
########################################################################################################################
# TextNode -------------------------------------------------------------------------------------------------------------
class TextNode(object):
"""
Represents a piece of text inside the DOM.
This is the *only* object not deriving from "Widget", as it does
not support any of its properties.
"""
def __init__(self, txt=None, *args, **kwargs):
super().__init__()
self._parent = None
self._children = []
self.element = domCreateTextNode(txt or "")
self._isAttached = False
def _setText(self, txt):
self.element.data = txt
def _getText(self):
return self.element.data
def __str__(self):
return self.element.data
def onAttach(self):
self._isAttached = True
def onDetach(self):
self._isAttached = False
def _setDisabled(self, disabled):
return
def _getDisabled(self):
return False
def children(self):
return []
# _WidgetClassWrapper -------------------------------------------------------------------------------------------------
class _WidgetClassWrapper(list):
def __init__(self, targetWidget):
super().__init__()
self.targetWidget = targetWidget
# Initially read content of element into current wrappper
value = targetWidget.element.getAttribute("class")
if value:
for c in value.split(" "):
list.append(self, c)
def set(self, value):
if value is None:
value = []
elif isinstance(value, str):
value = value.split(" ")
elif not isinstance(value, list):
raise ValueError("Value must be a str, a List or None")
list.clear(self)
list.extend(self, value)
self._updateElem()
def _updateElem(self):
if len(self) == 0:
self.targetWidget.element.removeAttribute("class")
else:
self.targetWidget.element.setAttribute("class", " ".join(self))
def append(self, p_object):
list.append(self, p_object)
self._updateElem()
def clear(self):
list.clear(self)
self._updateElem()
def remove(self, value):
try:
list.remove(self, value)
except:
pass
self._updateElem()
def extend(self, iterable):
list.extend(self, iterable)
self._updateElem()
def insert(self, index, p_object):
list.insert(self, index, p_object)
self._updateElem()
def pop(self, index=None):
list.pop(self, index)
self._updateElem()
# _WidgetDataWrapper ---------------------------------------------------------------------------------------------------
class _WidgetDataWrapper(dict):
def __init__(self, targetWidget):
super().__init__()
self.targetWidget = targetWidget
alldata = targetWidget.element
for data in dir(alldata.dataset):
dict.__setitem__(self, data, getattr(alldata.dataset, data))
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.targetWidget.element.setAttribute(str("data-" + key), value)
def update(self, E=None, **F):
dict.update(self, E, **F)
if E is not None and "keys" in dir(E):
for key in E:
self.targetWidget.element.setAttribute(str("data-" + key), E["data-" + key])
elif E:
for (key, val) in E:
self.targetWidget.element.setAttribute(str("data-" + key), "data-" + val)
for key in F:
self.targetWidget.element.setAttribute(str("data-" + key), F["data-" + key])
# _WidgetStyleWrapper --------------------------------------------------------------------------------------------------
class _WidgetStyleWrapper(dict):
def __init__(self, targetWidget):
super().__init__()
self.targetWidget = targetWidget
style = targetWidget.element.style
for key in dir(style):
# Convert JS-Style-Syntax to CSS Syntax (ie borderTop -> border-top)
realKey = ""
for currChar in key:
if currChar.isupper():
realKey += "-"
realKey += currChar.lower()
val = style.getPropertyValue(realKey)
if val:
dict.__setitem__(self, realKey, val)
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.targetWidget.element.style.setProperty(key, value)
def update(self, E=None, **F):
dict.update(self, E, **F)
if E is not None and "keys" in dir(E):
for key in E:
self.targetWidget.element.style.setProperty(key, E[key])
elif E:
for (key, val) in E:
self.targetWidget.element.style.setProperty(key, val)
for key in F:
self.targetWidget.element.style.setProperty(key, F[key])
# Widget ---------------------------------------------------------------------------------------------------------------
class Widget(object):
_tagName = None # Defines the tag-name that is used for DOM-Element construction
_leafTag = False # Defines whether ths Widget may contain other Widgets (default) or is a leaf
_namespace = None # Namespace
_parserTagName = None # Alternative tag name under which this Widget is registered in HTML parser
style = [] # CSS-classes to directly assign to this Widget at construction.
def __init__(self, *args, appendTo=None, style=None, **kwargs):
if "_wrapElem" in kwargs.keys():
self.element = kwargs["_wrapElem"]
del kwargs["_wrapElem"]
else:
assert self._tagName is not None
self.element = domCreateElement(self._tagName, ns=self._namespace)
self._widgetClassWrapper = None
super().__init__()
self.addClass(self.style)
if style:
self.addClass(style)
self._children = []
self._catchedEvents = {}
self._disabledState = 0
self._isAttached = False
self._parent = None
if args:
self.appendChild(*args, **kwargs)
if appendTo:
appendTo.appendChild(self)
def sinkEvent(self, *args):
for event_attrName in args:
event = event_attrName.lower()
if event_attrName in self._catchedEvents or event in ["onattach", "ondetach"]:
continue
eventFn = getattr(self, event_attrName, None)
assert eventFn and callable(eventFn), "{} must provide a {} method".format(str(self), event_attrName)
self._catchedEvents[event_attrName] = eventFn
if event.startswith("on"):
event = event[2:]
self.element.addEventListener(event, eventFn)
def unsinkEvent(self, *args):
for event_attrName in args:
event = event_attrName.lower()
if event_attrName not in self._catchedEvents:
continue
eventFn = self._catchedEvents[event_attrName]
del self._catchedEvents[event_attrName]
if event.startswith("on"):
event = event[2:]
self.element.removeEventListener(event, eventFn)
def disable(self):
"""
Disables an element, in case it is not already disabled.
On disabled elements, events are not triggered anymore.
"""
if not self["disabled"]:
self["disabled"] = True
def enable(self):
"""
Enables an element, in case it is not already enabled.
"""
if self["disabled"]:
self["disabled"] = False
def _getTargetfuncName(self, key, type):
assert type in ["get", "set"]
return "_{}{}{}".format(type, key[0].upper(), key[1:])
def __getitem__(self, key):
funcName = self._getTargetfuncName(key, "get")
if funcName in dir(self):
return getattr(self, funcName)()
return None
def __setitem__(self, key, value):
funcName = self._getTargetfuncName(key, "set")
if funcName in dir(self):
return getattr(self, funcName)(value)
raise ValueError("{} is no valid attribute for {}".format(key, (self._tagName or str(self))))
def __str__(self):
return str(self.__class__.__name__)
def __iter__(self):
return self._children.__iter__()
def _getData(self):
"""
Custom data attributes are intended to store custom data private to the page or application, for which there are no more appropriate attributes or elements.
:param name:
:returns:
"""
return _WidgetDataWrapper(self)
def _getTranslate(self):
"""
Specifies whether an elements attribute values and contents of its children are to be translated when the page is localized, or whether to leave them unchanged.
:returns: True | False
"""
return True if self.element.translate == "yes" else False
def _setTranslate(self, val):
"""
Specifies whether an elements attribute values and contents of its children are to be translated when the page is localized, or whether to leave them unchanged.
:param val: True | False
"""
self.element.translate = "yes" if val == True else "no"
def _getTitle(self):
"""
Advisory information associated with the element.
:returns: str
"""
return self.element.title
def _setTitle(self, val):
"""
Advisory information associated with the element.
:param val: str
"""
self.element.title = val
def _getTabindex(self):
"""
Specifies whether the element represents an element that is is focusable (that is, an element which is part of the sequence of focusable elements in the document), and the relative order of the element in the sequence of focusable elements in the document.
:returns: number
"""
return self.element.getAttribute("tabindex")
def _setTabindex(self, val):
"""
Specifies whether the element represents an element that is is focusable (that is, an element which is part of the sequence of focusable elements in the document), and the relative order of the element in the sequence of focusable elements in the document.
:param val: number
"""
self.element.setAttribute("tabindex", val)
def _getSpellcheck(self):
"""
Specifies whether the element represents an element whose contents are subject to spell checking and grammar checking.
:returns: True | False
"""
return True if self.element.spellcheck == "true" else False
def _setSpellcheck(self, val):
"""
Specifies whether the element represents an element whose contents are subject to spell checking and grammar checking.
:param val: True | False
"""
self.element.spellcheck = str(val).lower()
def _getLang(self):
"""
Specifies the primary language for the contents of the element and for any of the elements attributes that contain text.
:returns: language tag e.g. de|en|fr|es|it|ru|
"""
return self.element.lang
def _setLang(self, val):
"""
Specifies the primary language for the contents of the element and for any of the elements attributes that contain text.
:param val: language tag
"""
self.element.lang = val
def _getHidden(self):
"""
Specifies that the element represents an element that is not yet, or is no longer, relevant.
:returns: True | False
"""
return True if self.element.hasAttribute("hidden") else False
def _setHidden(self, val):
"""
Specifies that the element represents an element that is not yet, or is no longer, relevant.
:param val: True | False
"""
if val:
self.element.setAttribute("hidden", "")
self.addClass("is-hidden")
else:
self.element.removeAttribute("hidden")
self.removeClass("is-hidden")
def _getDisabled(self):
return bool(self._disabledState)
def _setDisabled(self, disable):
for child in self._children:
child._setDisabled(disable)
if disable:
self._disabledState += 1
if isinstance(self, _attrDisabled) and self._disabledState == 1:
self.element.disabled = True
elif self._disabledState > 0:
if isinstance(self, _attrDisabled) and self._disabledState == 1:
self.element.disabled = False
self._disabledState -= 1
def _getDropzone(self):
"""
Specifies what types of content can be dropped on the element, and instructs the UA about which actions to take with content when it is dropped on the element.
:returns: "copy" | "move" | "link"
"""
return self.element.dropzone
def _setDropzone(self, val):
"""
Specifies what types of content can be dropped on the element, and instructs the UA about which actions to take with content when it is dropped on the element.
:param val: "copy" | "move" | "link"
"""
self.element.dropzone = val
def _getDraggable(self):
"""
Specifies whether the element is draggable.
:returns: True | False | "auto"
"""
return (self.element.draggable if str(self.element.draggable) == "auto" else (
True if str(self.element.draggable).lower() == "true" else False))
def _setDraggable(self, val):
"""
Specifies whether the element is draggable.
:param val: True | False | "auto"
"""
self.element.draggable = str(val).lower()
def _getDir(self):
"""
Specifies the elements text directionality.
:returns: ltr | rtl | auto
"""
return self.element.dir
def _setDir(self, val):
"""
Specifies the elements text directionality.
:param val: ltr | rtl | auto
"""
self.element.dir = val
def _getContextmenu(self):
"""
The value of the id attribute on the menu with which to associate the element as a context menu.
:returns:
"""
return self.element.contextmenu
def _setContextmenu(self, val):
"""
The value of the id attribute on the menu with which to associate the element as a context menu.
:param val:
"""
self.element.contextmenu = val
def _getContenteditable(self):
"""
Specifies whether the contents of the element are editable.
:returns: True | False
"""
v = self.element.getAttribute("contenteditable")
return str(v).lower() == "true"
def _setContenteditable(self, val):
"""
Specifies whether the contents of the element are editable.
:param val: True | False
"""
self.element.setAttribute("contenteditable", str(val).lower())
def _getAccesskey(self):
"""
A key label or list of key labels with which to associate the element; each key label represents a keyboard shortcut which UAs can use to activate the element or give focus to the element.
:param self:
:returns:
"""
return self.element.accesskey
def _setAccesskey(self, val):
"""
A key label or list of key labels with which to associate the element; each key label represents a keyboard shortcut which UAs can use to activate the element or give focus to the element.
:param self:
:param val:
"""
self.element.accesskey = val
def _getId(self):
"""
Specifies a unique id for an element
:param self:
:returns:
"""
return self.element.id
def _setId(self, val):
"""
Specifies a unique id for an element
:param self:
:param val:
"""
self.element.id = val
def _getClass(self):
"""
The class attribute specifies one or more classnames for an element.
:returns:
"""
if self._widgetClassWrapper is None:
self._widgetClassWrapper = _WidgetClassWrapper(self)
return self._widgetClassWrapper
def _setClass(self, value):
"""
The class attribute specifies one or more classnames for an element.
:param self:
:param value:
@raise ValueError:
"""
self._getClass().set(value)
def _getStyle(self):
"""
The style attribute specifies an inline style for an element.
:param self:
:returns:
"""
return _WidgetStyleWrapper(self)
def _getRole(self):
"""
Specifies a role for an element
@param self:
@return:
"""
return self.element.getAttribute("role")
def _setRole(self, val):
"""
Specifies a role for an element
@param self:
@param val:
"""
self.element.setAttribute("role", val)
def hide(self):
"""
Hide element, if shown.
:return:
"""
if not self["hidden"]:
self["hidden"] = True
def show(self):
"""
Show element, if hidden.
:return:
"""
if self["hidden"]:
self["hidden"] = False
def isHidden(self):
"""
Checks if a widget is hidden.
:return: True if hidden, False otherwise.
"""
return self["hidden"]
def isVisible(self):
"""
Checks if a widget is visible.
:return: True if visible, False otherwise.
"""
return not self.isHidden()
def onBind(self, widget, name):
"""
Event function that is called on the widget when it is bound to another widget with a name.
This is only done by the HTML parser, a manual binding by the user is not triggered.
"""
return
def onAttach(self):
self._isAttached = True
for c in self._children:
c.onAttach()
def onDetach(self):
self._isAttached = False
for c in self._children:
c.onDetach()
def __collectChildren(self, *args, **kwargs):
if kwargs.get("bindTo") is None:
kwargs["bindTo"] = self
widgets = []
for arg in args:
if isinstance(arg, (str, HtmlAst)):
widgets.extend(fromHTML(arg, **kwargs))
elif isinstance(arg, (list, tuple)):
for subarg in arg:
widgets.extend(self.__collectChildren(subarg, **kwargs))
elif not isinstance(arg, (Widget, TextNode)):
widgets.append(TextNode(str(arg)))
else:
widgets.append(arg)
return widgets
def insertBefore(self, insert, child, **kwargs):
if not child:
return self.appendChild(insert)
assert child in self._children, "{} is not a child of {}".format(child, self)
toInsert = self.__collectChildren(insert, **kwargs)
for insert in toInsert:
if insert._parent:
insert._parent.removeChild(insert)
self.element.insertBefore(insert.element, child.element)
self._children.insert(self._children.index(child), insert)
insert._parent = self
if self._isAttached:
insert.onAttach()
return toInsert
def prependChild(self, *args, **kwargs):
if kwargs.get("replace", False):
self.removeAllChildren()
del kwargs["replace"]
toPrepend = self.__collectChildren(*args, **kwargs)
for child in toPrepend:
if child._parent:
child._parent._children.remove(child)
child._parent = None
if not self._children:
self.appendChild(child)
else:
self.insertBefore(child, self.children(0))
return toPrepend
def appendChild(self, *args, **kwargs):
if kwargs.get("replace", False):
self.removeAllChildren()
del kwargs["replace"]
toAppend = self.__collectChildren(*args, **kwargs)
for child in toAppend:
if isinstance(child, Template):
return self.appendChild(child._children)
if child._parent:
child._parent._children.remove(child)
self._children.append(child)
self.element.appendChild(child.element)
child._parent = self
if self._isAttached:
child.onAttach()
return toAppend
def removeChild(self, child):
assert child in self._children, "{} is not a child of {}".format(child, self)
if child._isAttached:
child.onDetach()
self.element.removeChild(child.element)
self._children.remove(child)
child._parent = None
def removeAllChildren(self):
"""
Removes all child widgets of the current widget.
"""
for child in self._children[:]:
self.removeChild(child)
def isParentOf(self, widget):
"""
Checks if an object is the parent of widget.
:type widget: Widget
:param widget: The widget to check for.
:return: True, if widget is a child of the object, else False.
"""
# You cannot be your own child!
if self == widget:
return False
for child in self._children:
if child == widget:
return True
if child.isParentOf(widget):
return True
return False
def isChildOf(self, widget):
"""
Checks if an object is the child of widget.
:type widget: Widget
:param widget: The widget to check for.
:return: True, if object is a child of widget, else False.
"""
# You cannot be your own parent!
if self == widget:
return False
parent = self.parent()
while parent:
if parent == widget:
return True
parent = widget.parent()
return False
def hasClass(self, className):
"""
Determine whether the current widget is assigned the given class
:param className: The class name to search for.
:type className: str
"""
if isinstance(className, str) or isinstance(className, unicode):
return className in self["class"]
else:
raise TypeError()
def addClass(self, *args):
"""
Adds a class or a list of classes to the current widget.
If the widget already has the class, it is ignored.
:param args: A list of class names. This can also be a list.
:type args: list of str | list of list of str
"""
for item in args:
if isinstance(item, list):
self.addClass(*item)
elif isinstance(item, str):
for sitem in item.split(" "):
if not self.hasClass(sitem):
self["class"].append(sitem)
else:
raise TypeError()
def hasClass(self, name):
"""
Checks whether the widget has class name set or unset.
:param name: The class-name to be checked.
:type args: str
"""
return name in self["class"]
def removeClass(self, *args):
"""
Removes a class or a list of classes from the current widget.
:param args: A list of class names. This can also be a list.
:type args: list of str | list of list of str
"""
for item in args:
if isinstance(item, list):
self.removeClass(item)
elif isinstance(item, str):
for sitem in item.split(" "):
if self.hasClass(sitem):
self["class"].remove(sitem)
else:
raise TypeError()
def toggleClass(self, on, off=None):
"""
Toggles the class ``on``.
If the widget contains a class ``on``, it is toggled by ``off``.
``off`` can either be a class name that is substituted, or nothing.
:param on: Classname to test for. If ``on`` does not exist, but ``off``, ``off`` is replaced by ``on``.
:type on: str
:param off: Classname to replace if ``on`` existed.
:type off: str
:return: Returns True, if ``on`` was switched, else False.
:rtype: bool
"""
if self.hasClass(on):
self.removeClass(on)
if off and not self.hasClass(off):
self.addClass(off)
return False
if off and self.hasClass(off):
self.removeClass(off)
self.addClass(on)
return True
def onBlur(self, event):
pass
def onChange(self, event):
pass
def onContextMenu(self, event):
pass
def onFocus(self, event):
pass
def onFocusIn(self, event):
pass
def onFocusOut(self, event):
pass
def onFormChange(self, event):
pass
def onFormInput(self, event):
pass
def onInput(self, event):
pass
def onInvalid(self, event):
pass
def onReset(self, event):
pass
def onSelect(self, event):
pass
def onSubmit(self, event):
pass
def onKeyDown(self, event):
pass
def onKeyPress(self, event):
pass
def onKeyUp(self, event):
pass
def onClick(self, event):
pass
def onDblClick(self, event):
pass
def onDrag(self, event):
pass
def onDragEnd(self, event):
pass
def onDragEnter(self, event):
pass
def onDragLeave(self, event):
pass
def onDragOver(self, event):
pass
def onDragStart(self, event):
pass
def onDrop(self, event):
pass
def onMouseDown(self, event):
pass
def onMouseMove(self, event):
pass
def onMouseOut(self, event):
pass
def onMouseOver(self, event):
pass
def onMouseUp(self, event):
pass
def onMouseWheel(self, event):
pass
def onScroll(self, event):
pass
def onTouchStart(self, event):
pass
def onTouchEnd(self, event):
pass
def onTouchMove(self, event):
pass
def onTouchCancel(self, event):
pass
def focus(self):
self.element.focus()
def blur(self):
self.element.blur()
def parent(self):
return self._parent
def children(self, n=None):
"""
Access children of widget.
If ``n`` is ommitted, it returns a list of all child-widgets;
Else, it returns the N'th child, or None if its out of bounds.
:param n: Optional offset of child widget to return.
:type n: int
:return: Returns all children or only the requested one.
:rtype: list | Widget | None
"""
if n is None:
return self._children[:]
try:
return self._children[n]
except IndexError:
return None
def sortChildren(self, key):
"""
Sorts our direct children. They are rearranged on DOM level.
Key must be a function accepting one widget as parameter and must return
the key used to sort these widgets.
"""
self._children.sort(key=key)
tmpl = self._children[:]
tmpl.reverse()
for c in tmpl:
self.element.removeChild(c.element)
self.element.insertBefore(c.element, self.element.children.item(0))
def fromHTML(self, html, appendTo=None, bindTo=None, replace=False, vars=None, **kwargs):
"""
Parses html and constructs its elements as part of self.
:param html: HTML code.
:param appendTo: The entity where the HTML code is constructed below. This defaults to self in usual case.
:param bindTo: The entity where the named objects are bound to. This defaults to self in usual case.
:param replace: Clear entire content of appendTo before appending.
:param vars: Deprecated; Same as kwargs.
:param **kwargs: Additional variables provided as a dict for {{placeholders}} inside the HTML
:return:
"""
if appendTo is None:
appendTo = self
if bindTo is None:
bindTo = self
if replace:
appendTo.removeAllChildren()
# use of vars is deprecated!
if isinstance(vars, dict):
kwargs.update(vars)
return fromHTML(html, appendTo=appendTo, bindTo=bindTo, **kwargs)
########################################################################################################################
# Attribute Collectors
########################################################################################################################
# _attrLabel ---------------------------------------------------------------------------------------------------------------
class _attrLabel(object):
def _getLabel(self):
return self.element.getAttribute("label")
def _setLabel(self, val):
self.element.setAttribute("label", val)
# _attrCharset --------------------------------------------------------------------------------------------------------------
class _attrCharset(object):
def _getCharset(self):
return self.element._attrCharset
def _setCharset(self, val):
self.element._attrCharset = val
# _attrCite -----------------------------------------------------------------------------------------------------------------
class _attrCite(object):
def _getCite(self):
return self.element._attrCite
def _setCite(self, val):
self.element._attrCite = val
class _attrDatetime(object):
def _getDatetime(self):
return self.element.datetime
def _setDatetime(self, val):
self.element.datetime = val
# Form -----------------------------------------------------------------------------------------------------------------
class _attrForm(object):
def _getForm(self):
return self.element.form
def _setForm(self, val):
self.element.form = val
class _attrAlt(object):
def _getAlt(self):
return self.element.alt
def _setAlt(self, val):
self.element.alt = val
class _attrAutofocus(object):
def _getAutofocus(self):
return True if self.element.hasAttribute("autofocus") else False
def _setAutofocus(self, val):
if val:
self.element.setAttribute("autofocus", "")
else:
self.element.removeAttribute("autofocus")
class _attrDisabled(object):
pass
class _attrChecked(object):
def _getChecked(self):
return self.element.checked
def _setChecked(self, val):
self.element.checked = val
class _attrIndeterminate(object):
def _getIndeterminate(self):
return self.element.indeterminate
def _setIndeterminate(self, val):
self.element.indeterminate = val
class _attrName(object):
def _getName(self):
return self.element.getAttribute("name")
def _setName(self, val):
self.element.setAttribute("name", val)
class _attrValue(object):
def _getValue(self):
return self.element.value
def _setValue(self, val):
self.element.value = val
class _attrAutocomplete(object):
def _getAutocomplete(self):
return True if self.element.autocomplete == "on" else False
def _setAutocomplete(self, val):
self.element.autocomplete = "on" if val == True else "off"
class _attrRequired(object):
def _getRequired(self):
return True if self.element.hasAttribute("required") else False
def _setRequired(self, val):
if val:
self.element.setAttribute("required", "")
else:
self.element.removeAttribute("required")
class _attrMultiple(object):
def _getMultiple(self):
return True if self.element.hasAttribute("multiple") else False
def _setMultiple(self, val):
if val:
self.element.setAttribute("multiple", "")
else:
self.element.removeAttribute("multiple")
class _attrSize(object):
def _getSize(self):
return self.element.size
def _setSize(self, val):
self.element.size = val
class _attrFor(object):
def _getFor(self):
return self.element.getAttribute("for")
def _setFor(self, val):
self.element.setAttribute("for", val)
class _attrInputs(_attrRequired):
def _getMaxlength(self):
return self.element.maxlength
def _setMaxlength(self, val):
self.element.maxlength = val
def _getPlaceholder(self):
return self.element.placeholder
def _setPlaceholder(self, val):
self.element.placeholder = val
def _getReadonly(self):
return True if self.element.hasAttribute("readonly") else False
def _setReadonly(self, val):
if val:
self.element.setAttribute("readonly", "")
else:
self.element.removeAttribute("readonly")
class _attrFormhead(object):
def _getFormaction(self):
return self.element.formaction
def _setFormaction(self, val):
self.element.formaction = val
def _getFormenctype(self):
return self.element.formenctype
def _setFormenctype(self, val):
self.element.formenctype = val
def _getFormmethod(self):
return self.element.formmethod
def _setFormmethod(self, val):
self.element.formmethod = val
def _getFormtarget(self):
return self.element.formtarget
def _setFormtarget(self, val):
self.element.formtarget = val
def _getFormnovalidate(self):
return True if self.element.hasAttribute("formnovalidate") else False
def _setFormnovalidate(self, val):
if val:
self.element.setAttribute("formnovalidate", "")
else:
self.element.removeAttribute("formnovalidate")
# _attrHref -----------------------------------------------------------------------------------------------------------------
class _attrHref(object):
def _getHref(self):
"""
Url of a Page
:param self:
"""
return self.element.href
def _setHref(self, val):
"""
Url of a Page
:param val: URL
"""
self.element.href = val
def _getHreflang(self):
return self.element.hreflang
def _setHreflang(self, val):
self.element.hreflang = val
class _attrTarget(object):
def _getTarget(self):
return self.element.target
def _setTarget(self, val):
self.element.target = val
# _attrMedia ----------------------------------------------------------------------------------------------------------------
class _attrType(object):
def _getType(self):
return self.element.type
def _setType(self, val):
self.element.type = val
class _attrMedia(_attrType):
def _getMedia(self):
return self.element.media
def _setMedia(self, val):
self.element.media = val
class _attrDimensions(object):
def _getWidth(self):
return self.element.width
def _setWidth(self, val):
self.element.width = val
def _getHeight(self):
return self.element.height
def _setHeight(self, val):
self.element.height = val
class _attrUsemap(object):
def _getUsemap(self):
return self.element.usemap
def _setUsemap(self, val):
self.element.usemap = val
class _attrMultimedia(object):
def _getAutoplay(self):
return True if self.element.hasAttribute("autoplay") else False
def _setAutoplay(self, val):
if val:
self.element.setAttribute("autoplay", "")
else:
self.element.removeAttribute("autoplay")
def _getPlaysinline(self):
return True if self.element.hasAttribute("playsinline") else False
def _setPlaysinline(self, val):
if val:
self.element.setAttribute("playsinline", "")
else:
self.element.removeAttribute("playsinline")
def _getControls(self):
return True if self.element.hasAttribute("controls") else False
def _setControls(self, val):
if val:
self.element.setAttribute("controls", "")
else:
self.element.removeAttribute("controls")
def _getLoop(self):
return True if self.element.hasAttribute("loop") else False
def _setLoop(self, val):
if val:
self.element.setAttribute("loop", "")
else:
self.element.removeAttribute("loop")
def _getMuted(self):
return True if self.element.hasAttribute("muted") else False
def _setMuted(self, val):
if val:
self.element.setAttribute("muted", "")
else:
self.element.removeAttribute("muted")
def _getPreload(self):
return self.element.preload
def _setPreload(self, val):
self.element.preload = val
# _attrRel ------------------------------------------------------------------------------------------------------------------
class _attrRel(object):
def _getRel(self):
return self.element.rel
def _setRel(self, val):
self.element.rel = val
# _attrSrc ------------------------------------------------------------------------------------------------------------------
class _attrSrc(object):
def _getSrc(self):
return self.element.src
def _setSrc(self, val):
self.element.src = val
########################################################################################################################
# HTML Elements
########################################################################################################################
# A --------------------------------------------------------------------------------------------------------------------
class A(Widget, _attrHref, _attrTarget, _attrMedia, _attrRel, _attrName):
_tagName = "a"
def _getDownload(self):
"""
The download attribute specifies the path to a download
:returns: filename
"""
return self.element.download
def _setDownload(self, val):
"""
The download attribute specifies the path to a download
:param val: filename
"""
self.element.download = val
# Area -----------------------------------------------------------------------------------------------------------------
class Area(A, _attrAlt):
_tagName = "area"
_leafTag = True
def _getCoords(self):
return self.element.coords
def _setCoords(self, val):
self.element.coords = val
def _getShape(self):
return self.element.shape
def _setShape(self, val):
self.element.shape = val
# Audio ----------------------------------------------------------------------------------------------------------------
class Audio(Widget, _attrSrc, _attrMultimedia):
_tagName = "audio"
class Bdo(Widget):
_tagName = "bdo"
# Blockquote -----------------------------------------------------------------------------------------------------------
class Blockquote(Widget):
_tagName = "blockquote"
def _getBlockquote(self):
return self.element.blockquote
def _setBlockquote(self, val):
self.element.blockquote = val
# Body -----------------------------------------------------------------------------------------------------------------
class BodyCls(Widget):
def __init__(self, *args, **kwargs):
super().__init__(_wrapElem=domGetElementsByTagName("body")[0], *args, **kwargs)
self._isAttached = True
_body = None
def Body():
global _body
if _body is None:
_body = BodyCls()
return _body
# Canvas ---------------------------------------------------------------------------------------------------------------
class Canvas(Widget, _attrDimensions):
_tagName = "canvas"
# Command --------------------------------------------------------------------------------------------------------------
class Command(Widget, _attrLabel, _attrType, _attrDisabled, _attrChecked):
_tagName = "command"
def _getIcon(self):
return self.element.icon
def _setIcon(self, val):
self.element.icon = val
def _getRadiogroup(self):
return self.element.radiogroup
def _setRadiogroup(self, val):
self.element.radiogroup = val
# _Del -----------------------------------------------------------------------------------------------------------------
class _Del(Widget, _attrCite, _attrDatetime):
_tagName = "_del"
# Dialog --------------------------------------------------------------------------------------------------------------
class Dialog(Widget):
_tagName = "dialog"
def _getOpen(self):
return True if self.element.hasAttribute("open") else False
def _setOpen(self, val):
if val:
self.element.setAttribute("open", "")
else:
self.element.removeAttribute("open")
# Elements -------------------------------------------------------------------------------------------------------------
class Abbr(Widget):
_tagName = "abbr"
class Address(Widget):
_tagName = "address"
class Article(Widget):
_tagName = "article"
class Aside(Widget):
_tagName = "aside"
class B(Widget):
_tagName = "b"
class Bdi(Widget):
_tagName = "bdi"
class Br(Widget):
_tagName = "br"
_leafTag = True
class Caption(Widget):
_tagName = "caption"
class Cite(Widget):
_tagName = "cite"
class Code(Widget):
_tagName = "code"
class Datalist(Widget):
_tagName = "datalist"
class Dfn(Widget):
_tagName = "dfn"
class Div(Widget):
_tagName = "div"
class Em(Widget):
_tagName = "em"
class Embed(Widget, _attrSrc, _attrType, _attrDimensions):
_tagName = "embed"
_leafTag = True
class Figcaption(Widget):
_tagName = "figcaption"
class Figure(Widget):
_tagName = "figure"
class Footer(Widget):
_tagName = "footer"
class Header(Widget):
_tagName = "header"
class H1(Widget):
_tagName = "h1"
class H2(Widget):
_tagName = "h2"
class H3(Widget):
_tagName = "h3"
class H4(Widget):
_tagName = "h4"
class H5(Widget):
_tagName = "h5"
class H6(Widget):
_tagName = "h6"
class Hr(Widget):
_tagName = "hr"
_leafTag = True
class I(Widget):
_tagName = "i"
class Kdb(Widget):
_tagName = "kdb"
class Legend(Widget):
_tagName = "legend"
class Mark(Widget):
_tagName = "mark"
class Noscript(Widget):
_tagName = "noscript"
class P(Widget):
_tagName = "p"
class Rq(Widget):
_tagName = "rq"
class Rt(Widget):
_tagName = "rt"
class Ruby(Widget):
_tagName = "ruby"
class S(Widget):
_tagName = "s"
class Samp(Widget):
_tagName = "samp"
class Section(Widget):
_tagName = "section"
class Small(Widget):
_tagName = "small"
class Strong(Widget):
_tagName = "strong"
class Sub(Widget):
_tagName = "sub"
class Summery(Widget):
_tagName = "summery"
class Sup(Widget):
_tagName = "sup"
class U(Widget):
_tagName = "u"
class Var(Widget):
_tagName = "var"
class Wbr(Widget):
_tagName = "wbr"
# Form -----------------------------------------------------------------------------------------------------------------
class Button(Widget, _attrDisabled, _attrType, _attrForm, _attrAutofocus, _attrName, _attrValue, _attrFormhead):
_tagName = "button"
class Fieldset(Widget, _attrDisabled, _attrForm, _attrName):
_tagName = "fieldset"
class Form(Widget, _attrDisabled, _attrName, _attrTarget, _attrAutocomplete):
_tagName = "form"
def _getNovalidate(self):
return True if self.element.hasAttribute("novalidate") else False
def _setNovalidate(self, val):
if val:
self.element.setAttribute("novalidate", "")
else:
self.element.removeAttribute("novalidate")
def _getAction(self):
return self.element.action
def _setAction(self, val):
self.element.action = val
def _getMethod(self):
return self.element.method
def _setMethod(self, val):
self.element.method = val
def _getEnctype(self):
return self.element.enctype
def _setEnctype(self, val):
self.element.enctype = val
def _getAccept_attrCharset(self):
return getattr(self.element, "accept-charset")
def _setAccept_attrCharset(self, val):
self.element.setAttribute("accept-charset", val)
class Input(Widget, _attrDisabled, _attrType, _attrForm, _attrAlt, _attrAutofocus, _attrChecked,
_attrIndeterminate, _attrName, _attrDimensions, _attrValue, _attrFormhead,
_attrAutocomplete, _attrInputs, _attrMultiple, _attrSize, _attrSrc):
_tagName = "input"
_leafTag = True
def _getAccept(self):
return self.element.accept
def _setAccept(self, val):
self.element.accept = val
def _getList(self):
return self.element.list
def _setList(self, val):
self.element.list = val
def _getMax(self):
return self.element.max
def _setMax(self, val):
self.element.max = val
def _getMin(self):
return self.element.min
def _setMin(self, val):
self.element.min = val
def _getPattern(self):
return self.element.pattern
def _setPattern(self, val):
self.element.pattern = val
def _getStep(self):
return self.element.step
def _setStep(self, val):
self.element.step = val
class Label(Widget, _attrForm, _attrFor):
_tagName = "label"
autoIdCounter = 0
def __init__(self, *args, forElem=None, **kwargs):
super().__init__(*args, **kwargs)
if forElem:
if not forElem["id"]:
idx = Label.autoIdCounter
Label.autoIdCounter += 1
forElem["id"] = "label-autoid-for-{}".format(idx)
self["for"] = forElem["id"]
class Optgroup(Widget, _attrDisabled, _attrLabel):
_tagName = "optgroup"
class Option(Widget, _attrDisabled, _attrLabel, _attrValue):
_tagName = "option"
def _getSelected(self):
return True if self.element.selected else False
def _setSelected(self, val):
if val:
self.element.selected = True
else:
self.element.selected = False
class Output(Widget, _attrForm, _attrName, _attrFor):
_tagName = "output"
class Select(Widget, _attrDisabled, _attrForm, _attrAutofocus, _attrName, _attrRequired, _attrMultiple, _attrSize):
_tagName = "select"
def _getSelectedIndex(self):
return self.element.selectedIndex
def _getOptions(self):
return self.element.options
class Textarea(Widget, _attrDisabled, _attrForm, _attrAutofocus, _attrName, _attrInputs, _attrValue):
_tagName = "textarea"
def _getCols(self):
return self.element.cols
def _setCols(self, val):
self.element.cols = val
def _getRows(self):
return self.element.rows
def _setRows(self, val):
self.element.rows = val
def _getWrap(self):
return self.element.wrap
def _setWrap(self, val):
self.element.wrap = val
# Head -----------------------------------------------------------------------------------------------------------------
class HeadCls(Widget):
def __init__(self, *args, **kwargs):
super().__init__(_wrapElem=domGetElementsByTagName("head")[0], *args, **kwargs)
self._isAttached = True
_head = None
def Head():
global _head
if _head is None:
_head = HeadCls()
return _head
# Iframe ---------------------------------------------------------------------------------------------------------------
class Iframe(Widget, _attrSrc, _attrName, _attrDimensions):
_tagName = "iframe"
def _getSandbox(self):
return self.element.sandbox
def _setSandbox(self, val):
self.element.sandbox = val
def _getSrcdoc(self):
return self.element.src
def _setSrcdoc(self, val):
self.element.src = val
def _getSeamless(self):
return True if self.element.hasAttribute("seamless") else False
def _setSeamless(self, val):
if val:
self.element.setAttribute("seamless", "")
else:
self.element.removeAttribute("seamless")
# Img ------------------------------------------------------------------------------------------------------------------
class Img(Widget, _attrSrc, _attrDimensions, _attrUsemap, _attrAlt):
_tagName = "img"
_leafTag = True
def __init__(self, src=None, *args, **kwargs):
super().__init__()
if src:
self["src"] = src
def _getCrossorigin(self):
return self.element.crossorigin
def _setCrossorigin(self, val):
self.element.crossorigin = val
def _getIsmap(self):
return self.element.ismap
def _setIsmap(self, val):
self.element.ismap = val
# Ins ------------------------------------------------------------------------------------------------------------------
class Ins(Widget, _attrCite, _attrDatetime):
_tagName = "ins"
# Keygen ---------------------------------------------------------------------------------------------------------------
class Keygen(Form, _attrAutofocus, _attrDisabled):
_tagName = "keygen"
def _getChallenge(self):
return True if self.element.hasAttribute("challenge") else False
def _setChallenge(self, val):
if val:
self.element.setAttribute("challenge", "")
else:
self.element.removeAttribute("challenge")
def _getKeytype(self):
return self.element.keytype
def _setKeytype(self, val):
self.element.keytype = val
# Link -----------------------------------------------------------------------------------------------------------------
class Link(Widget, _attrHref, _attrMedia, _attrRel):
_tagName = "link"
_leafTag = True
def _getSizes(self):
return self.element.sizes
def _setSizes(self, val):
self.element.sizes = val
# List -----------------------------------------------------------------------------------------------------------------
class Ul(Widget):
_tagName = "ul"
class Ol(Widget):
_tagName = "ol"
class Li(Widget):
_tagName = "li"
class Dl(Widget):
_tagName = "dl"
class Dt(Widget):
_tagName = "dt"
class Dd(Widget):
_tagName = "dd"
# Map ------------------------------------------------------------------------------------------------------------------
class Map(Label, _attrType):
_tagName = "map"
# Menu -----------------------------------------------------------------------------------------------------------------
class Menu(Widget):
_tagName = "menu"
# Meta -----------------------------------------------------------------------------------------------------------------
class Meta(Widget, _attrName, _attrCharset):
_tagName = "meta"
_leafTag = True
def _getContent(self):
return self.element.content
def _setContent(self, val):
self.element.content = val
# Meter ----------------------------------------------------------------------------------------------------------------
class Meter(Form, _attrValue):
_tagName = "meter"
def _getHigh(self):
return self.element.high
def _setHigh(self, val):
self.element.high = val
def _getLow(self):
return self.element.low
def _setLow(self, val):
self.element.low = val
def _getMax(self):
return self.element.max
def _setMax(self, val):
self.element.max = val
def _getMin(self):
return self.element.min
def _setMin(self, val):
self.element.min = val
def _getOptimum(self):
return self.element.optimum
def _setOptimum(self, val):
self.element.optimum = val
# Nav ------------------------------------------------------------------------------------------------------------------
class Nav(Widget):
_tagName = "nav"
# Object -----------------------------------------------------------------------------------------------------------------
class Object(Form, _attrType, _attrName, _attrDimensions, _attrUsemap):
_tagName = "object"
# Param -----------------------------------------------------------------------------------------------------------------
class Param(Widget, _attrName, _attrValue):
_tagName = "param"
_leafTag = True
# Progress -------------------------------------------------------------------------------------------------------------
class Progress(Widget, _attrValue):
_tagName = "progress"
def _getMax(self):
return self.element.max
def _setMax(self, val):
self.element.max = val
# Q --------------------------------------------------------------------------------------------------------------------
class Q(Widget, _attrCite):
_tagName = "q"
# Script ----------------------------------------------------------------------------------------------------------------
class Script(Widget, _attrSrc, _attrCharset):
_tagName = "script"
def _getAsync(self):
return True if self.element.hasAttribute("async") else False
def _setAsync(self, val):
if val:
self.element.setAttribute("async", "")
else:
self.element.removeAttribute("async")
def _getDefer(self):
return True if self.element.hasAttribute("defer") else False
def _setDefer(self, val):
if val:
self.element.setAttribute("defer", "")
else:
self.element.removeAttribute("defer")
# Source ---------------------------------------------------------------------------------------------------------------
class Source(Widget, _attrMedia, _attrSrc):
_tagName = "source"
_leafTag = True
# Span -----------------------------------------------------------------------------------------------------------------
class Span(Widget):
_tagName = "span"
# Style ----------------------------------------------------------------------------------------------------------------
class Style(Widget, _attrMedia):
_tagName = "style"
def _getScoped(self):
return True if self.element.hasAttribute("scoped") else False
def _setScoped(self, val):
if val:
self.element.setAttribute("scoped", "")
else:
self.element.removeAttribute("scoped")
# Table ----------------------------------------------------------------------------------------------------------------
class Tr(Widget):
_tagName = "tr"
def _getRowspan(self):
span = self.element.getAttribute("rowspan")
return span if span else 1
def _setRowspan(self, span):
assert span >= 1, "span may not be negative"
self.element.setAttribute("rowspan", span)
return self
class Td(Widget):
_tagName = "td"
def _getColspan(self):
span = self.element.getAttribute("colspan")
return span if span else 1
def _setColspan(self, span):
assert span >= 1, "span may not be negative"
self.element.setAttribute("colspan", span)
return self
def _getRowspan(self):
span = self.element.getAttribute("rowspan")
return span if span else 1
def _setRowspan(self, span):
assert span >= 1, "span may not be negative"
self.element.setAttribute("rowspan", span)
return self
class Th(Td):
_tagName = "th"
class Thead(Widget):
_tagName = "thead"
class Tbody(Widget):
_tagName = "tbody"
class ColWrapper(object):
def __init__(self, parentElem, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parentElem = parentElem
def __getitem__(self, item):
assert isinstance(item, int), "Invalid col-number. Expected int, got {}".format(str(type(item)))
if item < 0 or item > len(self.parentElem._children):
return None
return self.parentElem._children[item]
def __setitem__(self, key, value):
col = self[key]
assert col is not None, "Cannot assign widget to invalid column"
col.removeAllChildren()
if isinstance(value, list) or isinstance(value, tuple):
for el in value:
if isinstance(el, Widget) or isinstance(el, TextNode):
col.appendChild(value)
elif isinstance(value, Widget) or isinstance(value, TextNode):
col.appendChild(value)
class RowWrapper(object):
def __init__(self, parentElem, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parentElem = parentElem
def __getitem__(self, item):
assert isinstance(item, int), "Invalid row-number. Expected int, got {}".format(str(type(item)))
if item < 0 or item > len(self.parentElem._children):
return None
return ColWrapper(self.parentElem._children[item])
class Table(Widget):
_tagName = "table"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.head = Thead()
self.body = Tbody()
self.appendChild(self.head)
self.appendChild(self.body)
def prepareRow(self, row):
assert row >= 0, "Cannot create rows with negative index"
for child in self.body._children:
row -= child["rowspan"]
if row < 0:
return
while row >= 0:
self.body.appendChild(Tr())
row -= 1
def prepareCol(self, row, col):
assert col >= 0, "Cannot create cols with negative index"
self.prepareRow(row)
for rowChild in self.body._children:
row -= rowChild["rowspan"]
if row < 0:
for colChild in rowChild._children:
col -= colChild["colspan"]
if col < 0:
return
while col >= 0:
rowChild.appendChild(Td())
col -= 1
return
def prepareGrid(self, rows, cols):
for row in range(self.getRowCount(), self.getRowCount() + rows):
self.prepareCol(row, cols)
def clear(self):
for row in self.body._children[:]:
for col in row._children[:]:
row.removeChild(col)
self.body.removeChild(row)
def _getCell(self):
return RowWrapper(self.body)
def getRowCount(self):
cnt = 0
for tr in self.body._children:
cnt += tr["rowspan"]
return cnt
# Time -----------------------------------------------------------------------------------------------------------------
class Time(Widget, _attrDatetime):
_tagName = "time"
# Track ----------------------------------------------------------------------------------------------------------------
class Track(Label, _attrSrc):
_tagName = "track"
_leafTag = True
def _getKind(self):
return self.element.kind
def _setKind(self, val):
self.element.kind = val
def _getSrclang(self):
return self.element.srclang
def _setSrclang(self, val):
self.element.srclang = val
def _getDefault(self):
return True if self.element.hasAttribute("default") else False
def _setDefault(self, val):
if val:
self.element.setAttribute("default", "")
else:
self.element.removeAttribute("default")
# Video ----------------------------------------------------------------------------------------------------------------
class Video(Widget, _attrSrc, _attrDimensions, _attrMultimedia):
_tagName = "video"
def _getPoster(self):
return self.element.poster
def _setPoster(self, val):
self.element.poster = val
# Template -------------------------------------------------------------------------------------------------------------
class Template(Widget):
_tagName = "template"
########################################################################################################################
# Utilities
########################################################################################################################
def unescape(val, maxLength=0):
"""
Unquotes several HTML-quoted characters in a string.
:param val: The value to be unescaped.
:type val: str
:param maxLength: Cut-off after maxLength characters.
A value of 0 means "unlimited". (default)
:type maxLength: int
:returns: The unquoted string.
:rtype: str
"""
val = val \
.replace("<", "<") \
.replace(">", ">") \
.replace(""", "\"") \
.replace("'", "'")
if maxLength > 0:
return val[0:maxLength]
return val
def doesEventHitWidgetOrParents(event, widget):
"""
Test if event 'event' hits widget 'widget' (or *any* of its parents)
"""
while widget:
if event.target == widget.element:
return True
widget = widget.parent()
return False
def doesEventHitWidgetOrChildren(event, widget):
"""
Test if event 'event' hits widget 'widget' (or *any* of its children)
"""
if event.target == widget.element:
return True
for child in widget._children:
if doesEventHitWidgetOrChildren(event, child):
return True
return False
def textToHtml(node, text):
"""
Generates html nodes from text by splitting text into content and into
line breaks html5.Br.
:param node: The node where the nodes are appended to.
:param text: The text to be inserted.
"""
for (i, part) in enumerate(text.split("\n")):
if i > 0:
node.appendChild(Br())
node.appendChild(TextNode(part))
def parseInt(s, ret=0):
"""
Parses a value as int
"""
if not isinstance(s, str):
return int(s)
elif s:
if s[0] in "+-":
ts = s[1:]
else:
ts = s
if ts and all([_ in "0123456789" for _ in ts]):
return int(s)
return ret
def parseFloat(s, ret=0.0):
"""
Parses a value as float.
"""
if not isinstance(s, str):
return float(s)
elif s:
if s[0] in "+-":
ts = s[1:]
else:
ts = s
if ts and ts.count(".") <= 1 and all([_ in ".0123456789" for _ in ts]):
return float(s)
return ret
########################################################################################################################
# Keycodes
########################################################################################################################
def getKey(event):
"""
Returns the Key Identifier of the given event
Available Codes: https://www.w3.org/TR/2006/WD-DOM-Level-3-Events-20060413/keyset.html#KeySet-Set
"""
if hasattr(event, "key"):
return event.key
elif hasattr(event, "keyIdentifier"):
if event.keyIdentifier in ["Esc", "U+001B"]:
return "Escape"
else:
return event.keyIdentifier
return None
def isArrowLeft(event):
return getKey(event) in ["ArrowLeft", "Left"]
def isArrowUp(event):
return getKey(event) in ["ArrowUp", "Up"]
def isArrowRight(event):
return getKey(event) in ["ArrowRight", "Right"]
def isArrowDown(event):
return getKey(event) in ["ArrowDown", "Down"]
def isEscape(event):
return getKey(event) == "Escape"
def isReturn(event):
return getKey(event) == "Enter"
def isControl(event): # The Control (Ctrl) key.
return getKey(event) == "Control"
def isShift(event):
return getKey(event) == "Shift"
########################################################################################################################
# HTML parser
########################################################################################################################
# Global variables required by HTML parser
__tags = None
__domParser = None
def registerTag(tagName, widgetClass, override=True):
assert issubclass(widgetClass, Widget), "widgetClass must be a sub-class of Widget!"
global __tags
if __tags is None:
_buildTags()
if not override and tagName.lower() in __tags:
return
attr = []
for fname in dir(widgetClass):
if fname.startswith("_set"):
attr.append(fname[4:].lower())
__tags[tagName.lower()] = (widgetClass, attr)
def tag(cls):
assert issubclass(cls, Widget)
# This is a little bit ugly but works for the svg...
if str(cls.__module__).split(".")[-2] == "html5":
registerTag(cls._parserTagName or cls._tagName or cls.__name__, cls)
else:
registerTag(cls._parserTagName or cls.__name__, cls) # do NOT check for cls._tagName here!!!
return cls
def _buildTags(debug=False):
"""
Generates a dictionary of all to the html5-library
known tags and their associated objects and attributes.
"""
global __tags
if __tags is not None:
return
if __tags is None:
__tags = {}
for cname in globals().keys():
if cname.startswith("_"):
continue
cls = globals()[cname]
try:
if not issubclass(cls, Widget):
continue
except:
continue
registerTag(cls._parserTagName or cls._tagName or cls.__name__, cls, override=False)
if debug:
for tag in sorted(__tags.keys()):
print("{}: {}".format(tag, ", ".join(sorted(__tags[tag][1]))))
class HtmlAst(list):
pass
def parseHTML(html, debug=False):
"""
Parses the provided HTML-code according to the objects defined in the html5-library.
"""
def convertEncodedText(txt):
"""
Convert HTML-encoded text into decoded string.
The reason for this function is the handling of HTML entities, which is not
properly supported by native JavaScript.
We use the browser's DOM parser to to this, according to
https://stackoverflow.com/questions/3700326/decode-amp-back-to-in-javascript
:param txt: The encoded text.
:return: The decoded text.
"""
global __domParser
if jseval is None:
return txt
if __domParser is None:
__domParser = jseval("new DOMParser")
dom = __domParser.parseFromString("<!doctype html><body>" + str(txt), "text/html")
return dom.body.textContent
def scanWhite(l):
"""
Scan and return whitespace.
"""
ret = ""
while l and l[0] in " \t\r\n":
ret += l.pop(0)
return ret
def scanWord(l):
"""
Scan and return a word.
"""
ret = ""
while l and l[0] not in " \t\r\n" + "<>=\"'":
ret += l.pop(0)
return ret
stack = []
# Obtain tag descriptions, if not already done!
global __tags
if __tags is None:
_buildTags(debug=debug)
# Prepare stack and input
stack.append((None, None, HtmlAst()))
html = [ch for ch in html]
# Parse
while html:
tag = None
text = ""
# Auto-close leaf elements, e.g. like <hr>, <br>, etc.
while stack and stack[-1][0] and __tags[stack[-1][0]][0]._leafTag:
stack.pop()
if not stack:
break
parent = stack[-1][2]
while html:
ch = html.pop(0)
# Comment
if html and ch == "<" and "".join(html[:3]) == "!--":
html = html[3:]
while html and "".join(html[:3]) != "-->":
html.pop(0)
html = html[3:]
# Opening tag
elif html and ch == "<" and html[0] != "/":
tag = scanWord(html)
if tag.lower() in __tags:
break
text += ch + tag
# Closing tag
elif html and stack[-1][0] and ch == "<" and html[0] == "/":
junk = ch
junk += html.pop(0)
tag = scanWord(html)
junk += tag
if stack[-1][0] == tag.lower():
junk += scanWhite(html)
if html and html[0] == ">":
html.pop(0)
stack.pop()
tag = None
break
text += junk
tag = None
else:
text += ch
# Append plain text (if not only whitespace)
if (text and ((len(text) == 1 and text in ["\t "])
or not all([ch in " \t\r\n" for ch in text]))):
# print("text", text)
parent.append(convertEncodedText(text))
# Create tag
if tag:
tag = tag.lower()
# print("tag", tag)
elem = (tag, {}, HtmlAst())
stack.append(elem)
parent.append(elem)
while html:
scanWhite(html)
if not html:
break
# End of tag >
if html[0] == ">":
html.pop(0)
break
# Closing tag at end />
elif html[0] == "/":
html.pop(0)
scanWhite(html)
if html[0] == ">":
stack.pop()
html.pop(0)
break
val = att = scanWord(html).lower()
if not att:
html.pop(0)
continue
scanWhite(html)
if html[0] == "=":
html.pop(0)
scanWhite(html)
if html[0] in "\"'":
ch = html.pop(0)
val = ""
while html and html[0] != ch:
val += html.pop(0)
html.pop(0)
if att not in elem[1]:
elem[1][att] = val
else:
elem[1][att] += " " + val
continue
while stack and stack[-1][0]:
stack.pop()
return stack[0][2]
def fromHTML(html, appendTo=None, bindTo=None, debug=False, vars=None, **kwargs):
"""
Parses the provided HTML code according to the objects defined in the html5-library.
html can also be pre-compiled by `parseHTML()` so that it executes faster.
Constructs all objects as DOM nodes. The first level is chained into appendTo.
If no appendTo is provided, appendTo will be set to html5.Body().
If bindTo is provided, objects are bound to this widget.
```python
from vi import html5
div = html5.Div()
html5.parse.fromHTML('''
<div>Yeah!
<a href="hello world" [name]="myLink" class="trullman bernd" disabled>
hah ala malla" bababtschga"
<img src="/static/images/icon_home.svg" style="background-color: red;"/>st
<em>ah</em>ralla <i>malla tralla</i> da
</a>lala
</div>''', div)
div.myLink.appendChild("appended!")
```
"""
# Handle defaults
if bindTo is None:
bindTo = appendTo
if isinstance(html, str):
html = parseHTML(html, debug=debug)
assert isinstance(html, HtmlAst)
if isinstance(vars, dict):
kwargs.update(vars)
def replaceVars(txt):
for var, val in kwargs.items():
txt = txt.replace("{{%s}}" % var, str(val) if val is not None else "")
return txt
def interpret(parent, items):
ret = []
for item in items:
if isinstance(item, str):
txt = TextNode(replaceVars(item))
if parent:
parent.appendChild(txt)
ret.append(txt)
continue
tag = item[0]
atts = item[1]
children = item[2]
# Special handling for tables: A "thead" and "tbody" are already part of table!
if tag in ["thead", "tbody"] and isinstance(parent, Table):
wdg = getattr(parent, tag[1:])
# Usual way: Construct new element and chain it into the parent.
else:
wdg = __tags[tag][0]()
for att, val in atts.items():
val = replaceVars(val)
# The [name] attribute binds the current widget to bindTo under the provided name!
if att == "[name]":
# Allow disable binding!
if not bindTo:
logging.warning("html5: Unable to evaluate %r due unset bindTo", att)
continue
if getattr(bindTo, val, None):
logging.warning("html5: Cannot assign name %r because it already exists in %r", val, bindTo)
elif not (any([val.startswith(x) for x in string.ascii_letters + "_"])
and all([x in string.ascii_letters + string.digits + "_" for x in val[1:]])):
logging.warning("html5: Cannot assign name %r because it contains invalid characters", val)
else:
setattr(bindTo, val, wdg)
wdg.onBind(bindTo, val)
if debug: #fixme: remove debug flag!
logging.debug("html5: %r assigned to %r", val, bindTo)
# Class is handled via Widget.addClass()
elif att == "class":
# print(tag, att, val.split())
wdg.addClass(*val.split())
elif att == "disabled":
# print(tag, att, val)
if val == "disabled":
wdg.disable()
elif att == "hidden":
# print(tag, att, val)
if val == "hidden":
wdg.hide()
# style-attributes must be split into its separate parts to be mapped into the dict.
elif att == "style":
for dfn in val.split(";"):
if ":" not in dfn:
continue
att, val = dfn.split(":", 1)
# print(tag, "style", att.strip(), val.strip())
wdg["style"][att.strip()] = val.strip()
# data attributes are mapped into a related dict.
elif att.startswith("data-"):
wdg["data"][att[5:]] = val
# transfer attributes from the binder into current widget
elif att.startswith(":"):
if bindTo:
try:
setattr(wdg, att[1:], getattr(bindTo, val))
except Exception as e:
logging.exception(e)
else:
logging.error("html5: bindTo is unset, can't use %r here", att)
# add event listener on current widget to callbacks on the binder
elif att.startswith("@"):
if bindTo:
try:
callback = getattr(bindTo, val)
assert callable(callback), f"{callback} is not callable"
except Exception as e:
print(e)
continue
wdg.element.addEventListener(att[1:], callback)
else:
print("html5: bindTo is unset, can't use %r here", att)
# Otherwise, either store widget attribute or save value on widget.
else:
try:
wdg[att] = parseInt(val, val)
except ValueError:
if att in dir(wdg):
logging.error("html5: Attribute %r already defined for %r", att, wdg)
else:
setattr(wdg, att, val)
except Exception as e:
logging.exception(e)
interpret(wdg, children)
if parent and not wdg.parent():
parent.appendChild(wdg)
ret.append(wdg)
return ret
return interpret(appendTo, html)
if __name__ == '__main__':
print(globals())
| 22.745847 | 258 | 0.615658 |
acfa15f15e23a7f93fa298524d3557cf67b0e11f | 874 | py | Python | pytglib/api/types/passport_element_type.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/types/passport_element_type.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/types/passport_element_type.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
class PassportElementType(Object):
"""
Contains the type of a Telegram Passport element
No parameters required.
"""
ID = "passportElementType"
def __init__(self, **kwargs):
pass
@staticmethod
def read(q: dict, *args) -> "PassportElementTypeEmailAddress or PassportElementTypeRentalAgreement or PassportElementTypePersonalDetails or PassportElementTypeAddress or PassportElementTypeBankStatement or PassportElementTypePassportRegistration or PassportElementTypeDriverLicense or PassportElementTypeUtilityBill or PassportElementTypePhoneNumber or PassportElementTypeInternalPassport or PassportElementTypePassport or PassportElementTypeTemporaryRegistration or PassportElementTypeIdentityCard":
if q.get("@type"):
return Object.read(q)
return PassportElementType()
| 38 | 504 | 0.781465 |
acfa162fda5ba76fe5b3c1af81b66db46e71d6df | 3,776 | py | Python | old/test/dist.py | dominickeehan/bayesian-microlensing | bf95b8346019e6a6262e42e4c5c8e5b870c903b5 | [
"MIT"
] | 1 | 2021-10-13T00:41:02.000Z | 2021-10-13T00:41:02.000Z | old/test/dist.py | dominickeehan/bayesian-microlensing | bf95b8346019e6a6262e42e4c5c8e5b870c903b5 | [
"MIT"
] | null | null | null | old/test/dist.py | dominickeehan/bayesian-microlensing | bf95b8346019e6a6262e42e4c5c8e5b870c903b5 | [
"MIT"
] | null | null | null | import MulensModel as mm
import Functions as mc
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib
from scipy.stats import truncnorm, loguniform, uniform
#plt.style.use('ggplot')
print(plt.style.available)
#print(plt.rcParams["font.family"].available)
#print(matplotlib.get_cachedir())
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
#rc('font',**{'family':'serif','serif':['Times New Roman']})
#rc('text', usetex=True)
#plt.rcParams["font.family"] = "serif"
#print(plt.rcParams.keys())
#plt.rcParams['font.size'] = 12
s_pi = mc.logUniDist(0.2, 5)
q_pi = mc.logUniDist(10e-6, 1)
alpha_pi = mc.uniDist(0, 360)
u0_pi = mc.uniDist(0, 2)
t0_pi = mc.uniDist(0, 72)
tE_pi = mc.truncatedLogNormDist(1, 100, 10**1.15, 10**0.45)
rho_pi = mc.logUniDist(10**-4, 10**-2)
distr = tE_pi
y=[]
x=np.linspace(1, 100, 1000)
mu=0
for i in x:
mu+=np.exp(distr.log_PDF(i))*i
y.append(np.exp(distr.log_PDF(i)))
print(mu/len(x))
#print(y)
plt.rcParams["font.family"] = "serif"
plt.rcParams['font.size'] = 12
plt.style.use('seaborn-bright')
plt.rcParams["legend.edgecolor"] = '0'
plt.rcParams["legend.framealpha"] = 1
plt.rcParams["legend.title_fontsize"] = 10
plt.rcParams["legend.fontsize"] = 9
plt.rcParams["grid.linestyle"] = 'dashed'
plt.rcParams["grid.alpha"] = 0.25
plt.plot(x, y, label='Probability\nDensity')
plt.xlabel(r'Parameter [$\chi$]')
plt.ylabel(r'Probability Density [$\rho$]')
plt.title('Probability Density Function')
plt.legend(title='Entries')#, framealpha=1.0, edgecolor='0.0') #
#plt.axis('scaled')
plt.tight_layout()
plt.grid()
plt.savefig('Plots/pdf-test.png')
def centre_offsets_pointilism(supset_model, subset_model, symbols, name = '', dpi = 100):
supset_offsets = (supset_model.sampled.states_array(scaled = True) - supset_model.centre.scaled[:, np.newaxis])
subset_offsets = (subset_model.sampled.states_array(scaled = True) - subset_model.centre.scaled[:, np.newaxis])
n_dim = subset_model.D
style()
# construct shape with corner
figure = corner.corner(subset_offsets.T)
# font/visibility
plt.rcParams['font.size'] = 8
plt.rcParams['axes.titlesize'] = 14
plt.rcParams['axes.labelsize'] = 14
# extract the axes
axes = np.array(figure.axes).reshape((n_dim, n_dim))
# Loop over the diagonal to remove from plot
for i in range(n_dim):
ax = axes[i, i]
ax.cla()
ax.patch.set_alpha(0.0)
ax.axis('off')
ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticklabels([])
# loop over lower triangle
for yi in range(n_dim):
for xi in range(yi):
ax = axes[yi, xi]
ax.cla()
# overlay points
ax.scatter(subset_offsets[xi, :], subset_offsets[yi, :], c = np.linspace(0.0, 1.0, subset_model.sampled.n), cmap = 'winter', alpha = 0.15, marker = ".", s = 20, linewidth = 0.0)
ax.scatter(supset_offsets[xi, :], supset_offsets[yi, :], c = np.linspace(0.0, 1.0, supset_model.sampled.n), cmap = 'spring', alpha = 0.15, marker = ".", s = 20, linewidth = 0.0)
if yi == n_dim - 1: # last row
ax.set_xlabel(symbols[xi])
ax.tick_params(axis = 'x', labelrotation = 45)
else:
ax.axes.get_xaxis().set_ticklabels([])
if xi == 0: # first column
ax.set_ylabel(symbols[yi])
ax.tick_params(axis = 'y', labelrotation = 45)
else:
ax.axes.get_yaxis().set_ticklabels([])
figure.savefig('results/' + name + '-centreed-pointilism.png', bbox_inches = "tight", dpi = dpi, transparent=True)
figure.clf()
return | 27.562044 | 189 | 0.631356 |
acfa1669852e49eb403c9216de0eb066fa0f4371 | 3,122 | py | Python | eugene/src/categorize.py | jantzen/eugene | a5fdc8cfb31e1fa4e48b2f882be84347cc8a7d69 | [
"MIT"
] | 3 | 2017-04-11T22:12:41.000Z | 2021-06-29T20:08:59.000Z | eugene/src/categorize.py | jantzen/eugene | a5fdc8cfb31e1fa4e48b2f882be84347cc8a7d69 | [
"MIT"
] | null | null | null | eugene/src/categorize.py | jantzen/eugene | a5fdc8cfb31e1fa4e48b2f882be84347cc8a7d69 | [
"MIT"
] | 1 | 2021-04-09T08:51:14.000Z | 2021-04-09T08:51:14.000Z |
import time
import math
import random
import numpy as np
import pdb
import scipy.stats as stats
# import interface
import eugene as eu
#Classes:
# Category
#Functions:
# Classify
####################################################################
####################################################################
####################################################################
#Classes:
#
# Catgetory
#
#
class Category( object ):
""" Holds a list of system ids belonging to the same dynamical kind.
"""
def __init__(self, systems=set([])):
self._systems = systems
# self._paradigm = paradigm
def add_system(self, sys):
self._systems = self._systems.union(set([sys]))
# def update_paradigm(self, new_paradigm_system):
# self._paradigm = new_paradigm_system
####################################################################
####################################################################
####################################################################
#Functions:
def Classify(system_ids, models):
""" Assumes that the ith model corresponds to sys_id i.
"""
# pdb.set_trace()
# initialize the sort with the first system in the list of systems
classes = []
classes.append(Category(set([system_ids[0]])))
# sort the remainder of the systems
for sys_id in system_ids[1:]:
categorized = False
for c in classes:
# compare the unknown system to each system in c
same_category = True
for system in c._systems:
result = compare.CompareModels(models[sys_id], models[system])
if result == 1:
same_category = False
print('{0} is different from {1}'.format(sys_id, system))
break
if same_category:
c.add_system(sys_id)
categorized = True
print('{0} is the same as {1}'.format(sys_id, c._systems))
break
# if the system doesn't fit a known category, make a new one
if categorized == False:
classes.append(Category(set([sys_id])))
# # go back and try to classify any singletons
# revised_classes = []
# singletons = []
# for c in classes:
# if len(c._systems) == 1:
# singletons.append(c)
# else:
# revised_classes.append(c)
# for s in singletons:
# sys_id = s._systems.pop()
# categorized = False
# for c in revised_classes:
# # compare the unknown system to the paradigm
# result = compare.CompareModels(models[sys_id], c._paradigm)
# if result != None:
# categorized = True
# c.add_system(sys_id)
# c.update_paradigm(result)
# break
# # if the system doesn't fit a known category, make a new one
# if categorized == False:
# revised_classes.append(Category(set([sys_id]), models[sys_id]))
#
# classes = revised_classes
# return the list of classes
return classes
| 29.45283 | 78 | 0.51057 |
acfa167bdcc5e670ed582a6689c3b71d8a5437ae | 672 | py | Python | ampel/cli/LoadAllOfAction.py | AmpelProject/Ampel-interface | 3c272565c6817555e5a350f12c7d0e11f7d46bb9 | [
"BSD-3-Clause"
] | null | null | null | ampel/cli/LoadAllOfAction.py | AmpelProject/Ampel-interface | 3c272565c6817555e5a350f12c7d0e11f7d46bb9 | [
"BSD-3-Clause"
] | 8 | 2019-12-26T22:44:41.000Z | 2021-12-15T12:06:42.000Z | ampel/cli/LoadAllOfAction.py | AmpelProject/Ampel-interface | 3c272565c6817555e5a350f12c7d0e11f7d46bb9 | [
"BSD-3-Clause"
] | 1 | 2020-01-20T14:01:38.000Z | 2020-01-20T14:01:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : Ampel-interface/ampel/cli/LoadAllOfAction.py
# License : BSD-3-Clause
# Author : vb <vbrinnel@physik.hu-berlin.de>
# Date : 18.03.2021
# Last Modified Date: 16.09.2021
# Last Modified By : vb <vbrinnel@physik.hu-berlin.de>
from argparse import Action
from ampel.model.operator.AllOf import AllOf
class LoadAllOfAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
v = [
int(el) if el.lstrip("-+").isdigit() else el
for el in values
]
setattr(
namespace,
self.dest,
v[0] if len(v) == 1 else AllOf(all_of = v)
)
| 24.888889 | 67 | 0.63244 |
acfa16847ea5c25a7bbf3409613be17a514620d0 | 1,883 | py | Python | test.py | MasfiqurRahaman/TrainMCNN | 905ec63fdefd77e40622c0076339a13e23d0cb07 | [
"MIT"
] | null | null | null | test.py | MasfiqurRahaman/TrainMCNN | 905ec63fdefd77e40622c0076339a13e23d0cb07 | [
"MIT"
] | null | null | null | test.py | MasfiqurRahaman/TrainMCNN | 905ec63fdefd77e40622c0076339a13e23d0cb07 | [
"MIT"
] | null | null | null | import os
import torch
import numpy as np
from src.crowd_count import CrowdCounter
from src import network
from src.data_loader import ImageDataLoader
from src import utils
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
vis = False
save_output = True
data_path = './data/original/shanghaitech/part_A_final/test_data/images/'
gt_path = './data/original/shanghaitech/part_A_final/test_data/ground_truth_csv/'
model_path = './final_models/mcnn_shtechA_660.h5'
output_dir = './output/'
model_name = os.path.basename(model_path).split('.')[0]
file_results = os.path.join(output_dir,'results_' + model_name + '_.txt')
if not os.path.exists(output_dir):
os.mkdir(output_dir)
output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
net = CrowdCounter()
trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
net.cuda()
net.eval()
mae = 0.0
mse = 0.0
#load test data
data_loader = ImageDataLoader(data_path, gt_path, shuffle=False, gt_downsample=True, pre_load=True)
for blob in data_loader:
im_data = blob['data']
gt_data = blob['gt_density']
density_map = net(im_data, gt_data)
density_map = density_map.data.cpu().numpy()
gt_count = np.sum(gt_data)
et_count = np.sum(density_map)
mae += abs(gt_count-et_count)
mse += ((gt_count-et_count)*(gt_count-et_count))
if vis:
utils.display_results(im_data, gt_data, density_map)
if save_output:
utils.save_density_map(density_map, output_dir, 'output_' + blob['fname'].split('.')[0] + '.png')
mae = mae/data_loader.get_num_samples()
mse = np.sqrt(mse/data_loader.get_num_samples())
print('\nMAE: %0.2f, MSE: %0.2f' % (mae, mse))
f = open(file_results, 'w')
f.write('MAE: %0.2f, MSE: %0.2f' % (mae,mse))
f.close() | 29.888889 | 105 | 0.711099 |
acfa173b686db0e37f5f95559e6964c75be838b3 | 6,416 | py | Python | ocflib/lab/hours.py | NotRyan/ocflib | a443fc885bab0d6a7bd532ee48eaa49ed4ff2b27 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | ocflib/lab/hours.py | NotRyan/ocflib | a443fc885bab0d6a7bd532ee48eaa49ed4ff2b27 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | ocflib/lab/hours.py | NotRyan/ocflib | a443fc885bab0d6a7bd532ee48eaa49ed4ff2b27 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | """Methods for dealing with OCF lab hours.
All times are assumed to be OST (OCF Standard Time).
Usage:
>>> from ocflib.lab.hours import Day
>>> Day.from_date(date(2015, 10, 12))
Day(
date=datetime.date(2015, 10, 12),
weekday='Monday',
holiday=None,
hours=[Hour(open=9, close=21)],
)
"""
from collections import namedtuple
from datetime import date
from datetime import datetime
from datetime import time
from datetime import timedelta
import requests
HOURS_URL = 'https://www.ocf.berkeley.edu/~staff/hours_temp'
def _generate_regular_hours():
"""pull hours from ocfweb and return them in the manner expected by Day().
The canonical source of OCF lab hours is a Google Spreadsheet. Parsing
that sheet is handled by the ocfweb API. This function is a shim for code
that expects hours to come from ocflib, where they were originally
hardcoded.
>>> _generate_regular_hours()
{
Day.MONDAY: [Hour(time(11, 10), time(13), 'staff1'),
Hour(time(14, 10), time(18), 'staff2'),
...],
Day.TUESDAY: ...
...
}
"""
regular_hours = {}
for day, hours in requests.get(HOURS_URL).json().items():
regular_hours[int(day)] = [
Hour(
open=_parsetime(hour[0]),
close=_parsetime(hour[1]),
staffer=hour[2],
)
for hour in hours
]
return regular_hours
def _parsetime(t):
return datetime.strptime(t, '%H:%M:%S').time()
class Hour:
def __init__(self, open, close, staffer=None):
self.open = open
self.close = close
self.staffer = staffer
def __contains__(self, when):
if isinstance(when, datetime):
when = when.time()
return self.open <= when < self.close
def __eq__(self, other):
return self.open == other.open and \
self.close == other.close and \
self.staffer == other.staffer
class Day(namedtuple('Day', ['date', 'weekday', 'holiday', 'hours'])):
MONDAY = 0
TUESDAY = 1
WEDNESDAY = 2
THURSDAY = 3
FRIDAY = 4
SATURDAY = 5
SUNDAY = 6
@classmethod
def from_date(cls, when=None):
"""Return whether a Day representing the given day.
If not provided, when defaults to today.
"""
if not when:
when = date.today()
if isinstance(when, datetime):
when = when.date()
# check if it's a holiday
my_holiday = None
my_hours = _generate_regular_hours()[when.weekday()]
for start, end, name, hours in HOLIDAYS:
if start <= when <= end:
my_holiday = name
my_hours = hours
break
return cls(
date=when,
weekday=when.strftime('%A'),
holiday=my_holiday,
hours=my_hours,
)
def is_open(self, when=None):
"""Return whether the lab is open at the given time.
If not provided, when defaults to now.
"""
if not when:
when = datetime.now()
if not isinstance(when, datetime):
raise ValueError('{} must be a datetime instance'.format(when))
if self.date != when.date():
raise ValueError('{} is on a different day than {}'.format(when, self))
return any(when in hour for hour in self.hours)
def time_to_open(self, when=None):
"""Return timedelta object representing time until the lab is open from the given time.
If not provided, defaults to now"""
if not when:
when = datetime.now()
if not isinstance(when, datetime):
raise ValueError('{} must be a datetime instance'.format(when))
if self.date != when.date():
raise ValueError('{} is on a different day than {}'.format(when, self))
if self.is_open(when=when):
return timedelta()
def date_opens(date):
return [datetime.combine(date, h.open) for h in Day.from_date(date).hours]
opens = date_opens(self.date)
# because we assume when is in the current day, any hours in future dates don't need to be filtered
opens = [o for o in opens if o > when]
date = self.date
while not opens:
date += timedelta(days=1)
opens = date_opens(date)
return opens[0] - when
def time_to_close(self, when=None):
"""Return timedelta object representing time until the lab is closed from the given time.
If not provided, defaults to now"""
if not when:
when = datetime.now()
if not isinstance(when, datetime):
raise ValueError('{} must be a datetime instance'.format(when))
if self.date != when.date():
raise ValueError('{} is on a different day than {}'.format(when, self))
# because hour intervals should not overlap this should be length 0 or 1
hours = [hour for hour in self.hours if when in hour]
if not hours:
return timedelta()
return datetime.combine(self.date, hours[0].close) - when
@property
def closed_all_day(self):
return not self.hours
HOLIDAYS = [
# start date, end date, holiday name, list of hours (date ranges are inclusive)
(date(2018, 2, 1), date(2018, 2, 1), 'Early Lab Closure', [Hour(time(9), time(19))]),
(date(2018, 2, 4), date(2018, 2, 4), 'Early Lab Closure', [Hour(time(9), time(15))]),
(date(2018, 2, 19), date(2018, 2, 19), 'Presidents\' Day', []),
(date(2018, 3, 3), date(2018, 3, 3), 'Early Lab Closure', [Hour(time(9), time(12))]),
(date(2018, 3, 24), date(2018, 4, 1), 'Spring Break', []),
(date(2018, 5, 12), date(2018, 8, 21), 'Summer Break', []),
(date(2018, 9, 1), date(2018, 9, 3), 'Labor Day', []),
(date(2018, 9, 12), date(2018, 9, 12), 'Early Lab Closure', [Hour(time(9), time(19))]),
(date(2018, 9, 18), date(2018, 9, 18), 'Early Lab Closure', [Hour(time(9), time(17))]),
(date(2018, 9, 19), date(2018, 9, 19), 'Early Lab Closure', [Hour(time(9), time(19))]),
(date(2018, 11, 12), date(2018, 11, 12), 'Veterans Day', []),
(date(2018, 11, 21), date(2018, 11, 25), 'Thanksgiving Break', []),
(date(2018, 12, 15), date(2019, 1, 14), 'Winter Break', []),
]
| 31.297561 | 107 | 0.577618 |
acfa181e6fbaa2874b2c7b3392eb26ec581dc7e2 | 1,464 | py | Python | kupis/server_time.py | dev-acoustikue/auto_kupis_2 | 4f9d9f8374bf3f36c3004356b307a978d7a1500b | [
"MIT"
] | null | null | null | kupis/server_time.py | dev-acoustikue/auto_kupis_2 | 4f9d9f8374bf3f36c3004356b307a978d7a1500b | [
"MIT"
] | null | null | null | kupis/server_time.py | dev-acoustikue/auto_kupis_2 | 4f9d9f8374bf3f36c3004356b307a978d7a1500b | [
"MIT"
] | null | null | null | # [Project AutoKupis2] Auto-sugang project GUI.
# 0.1.0va, 19.12.29. First launched.
# written by acoustikue(SukJoon Oh)
# __ _ __
# ____ __________ __ _______/ /_(_) /____ _____
# / __ `/ ___/ __ \/ / / / ___/ __/ / //_/ / / / _ \
# / /_/ / /__/ /_/ / /_/ (__ ) /_/ / ,< / /_/ / __/
# \__,_/\___/\____/\__,_/____/\__/_/_/|_|\__,_/\___/
#
# Visual Studio Code
#
import requests
import re
import os, sys
# for import from parent directory
sys.path.append(
os.path.dirname(
os.path.abspath(os.path.dirname(__file__))) )
from config import KUPIS_SERVER_URL
# added utility, 2019.08.19.
def kupisServerTime():
date_from_header = requests.get(KUPIS_SERVER_URL).headers['Date']
# If it is not in a special case, server always sends me the time in the header.
# Refer to W3C docs.
# What I need is a date, converted to Korean time, GMT +9.
# or just use GMT 0.
# for example, the output will be like,
# Sun, 18 Aug 2019 16:23:36 GMT
# regex will be like, /^(0[0-9]|1[0-9]|2[0-3])(:[0-5]\d)(:[0-5]\d)/
exported_time = re.compile('\d\d:\d\d:\d\d').findall(date_from_header)[0]
#exported_h = exported_time[0:2]
#exported_m = exported_time[3:5]
#exported_s = exported_time[6:]
print('[console] Server time(GMT 0): ' + str(exported_time) + ' detected.')
return exported_time
| 28.705882 | 84 | 0.577186 |
acfa183accce05a13b3712bdeda09c4a3e8e4260 | 9,675 | py | Python | contrib/bitrpc/bitrpc.py | fakecoinbase/billy-mcfarlandslashtridentlayers | 1be5b4507379a211180e56820bf1da0e6f618a17 | [
"MIT"
] | 1 | 2020-03-01T01:03:34.000Z | 2020-03-01T01:03:34.000Z | contrib/bitrpc/bitrpc.py | fakecoinbase/billy-mcfarlandslashtridentlayers | 1be5b4507379a211180e56820bf1da0e6f618a17 | [
"MIT"
] | 2 | 2020-03-01T01:11:40.000Z | 2020-03-02T03:59:58.000Z | contrib/bitrpc/bitrpc.py | fakecoinbase/billy-mcfarlandslashtridentlayers | 1be5b4507379a211180e56820bf1da0e6f618a17 | [
"MIT"
] | null | null | null | from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Tridentlayers address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Tridentlayers address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| 28.62426 | 101 | 0.573953 |
acfa1955b719c9398c81b21a78a8de7a0af0e7c9 | 5,645 | py | Python | sdk/python/pulumi_aws/autoscaling/schedule.py | pulumi-bot/pulumi-aws | 756c60135851e015232043c8206567101b8ebd85 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/autoscaling/schedule.py | pulumi-bot/pulumi-aws | 756c60135851e015232043c8206567101b8ebd85 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/autoscaling/schedule.py | pulumi-bot/pulumi-aws | 756c60135851e015232043c8206567101b8ebd85 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class Schedule(pulumi.CustomResource):
"""
Provides an AutoScaling Schedule resource.
"""
def __init__(__self__, __name__, __opts__=None, autoscaling_group_name=None, desired_capacity=None, end_time=None, max_size=None, min_size=None, recurrence=None, scheduled_action_name=None, start_time=None):
"""Create a Schedule resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if not autoscaling_group_name:
raise TypeError('Missing required property autoscaling_group_name')
elif not isinstance(autoscaling_group_name, basestring):
raise TypeError('Expected property autoscaling_group_name to be a basestring')
__self__.autoscaling_group_name = autoscaling_group_name
"""
The name or Amazon Resource Name (ARN) of the Auto Scaling group.
"""
__props__['autoscalingGroupName'] = autoscaling_group_name
if desired_capacity and not isinstance(desired_capacity, int):
raise TypeError('Expected property desired_capacity to be a int')
__self__.desired_capacity = desired_capacity
"""
The number of EC2 instances that should be running in the group. Default 0. Set to -1 if you don't want to change the desired capacity at the scheduled time.
"""
__props__['desiredCapacity'] = desired_capacity
if end_time and not isinstance(end_time, basestring):
raise TypeError('Expected property end_time to be a basestring')
__self__.end_time = end_time
"""
The time for this action to end, in "YYYY-MM-DDThh:mm:ssZ" format in UTC/GMT only (for example, 2014-06-01T00:00:00Z ).
If you try to schedule your action in the past, Auto Scaling returns an error message.
"""
__props__['endTime'] = end_time
if max_size and not isinstance(max_size, int):
raise TypeError('Expected property max_size to be a int')
__self__.max_size = max_size
"""
The maximum size for the Auto Scaling group. Default 0.
Set to -1 if you don't want to change the maximum size at the scheduled time.
"""
__props__['maxSize'] = max_size
if min_size and not isinstance(min_size, int):
raise TypeError('Expected property min_size to be a int')
__self__.min_size = min_size
"""
The minimum size for the Auto Scaling group. Default 0.
Set to -1 if you don't want to change the minimum size at the scheduled time.
"""
__props__['minSize'] = min_size
if recurrence and not isinstance(recurrence, basestring):
raise TypeError('Expected property recurrence to be a basestring')
__self__.recurrence = recurrence
"""
The time when recurring future actions will start. Start time is specified by the user following the Unix cron syntax format.
"""
__props__['recurrence'] = recurrence
if not scheduled_action_name:
raise TypeError('Missing required property scheduled_action_name')
elif not isinstance(scheduled_action_name, basestring):
raise TypeError('Expected property scheduled_action_name to be a basestring')
__self__.scheduled_action_name = scheduled_action_name
"""
The name of this scaling action.
"""
__props__['scheduledActionName'] = scheduled_action_name
if start_time and not isinstance(start_time, basestring):
raise TypeError('Expected property start_time to be a basestring')
__self__.start_time = start_time
"""
The time for this action to start, in "YYYY-MM-DDThh:mm:ssZ" format in UTC/GMT only (for example, 2014-06-01T00:00:00Z ).
If you try to schedule your action in the past, Auto Scaling returns an error message.
"""
__props__['startTime'] = start_time
__self__.arn = pulumi.runtime.UNKNOWN
"""
The ARN assigned by AWS to the autoscaling schedule.
"""
super(Schedule, __self__).__init__(
'aws:autoscaling/schedule:Schedule',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'arn' in outs:
self.arn = outs['arn']
if 'autoscalingGroupName' in outs:
self.autoscaling_group_name = outs['autoscalingGroupName']
if 'desiredCapacity' in outs:
self.desired_capacity = outs['desiredCapacity']
if 'endTime' in outs:
self.end_time = outs['endTime']
if 'maxSize' in outs:
self.max_size = outs['maxSize']
if 'minSize' in outs:
self.min_size = outs['minSize']
if 'recurrence' in outs:
self.recurrence = outs['recurrence']
if 'scheduledActionName' in outs:
self.scheduled_action_name = outs['scheduledActionName']
if 'startTime' in outs:
self.start_time = outs['startTime']
| 45.16 | 211 | 0.655979 |
acfa1abda1eb39676b0258bf45fdc9292210954d | 633 | py | Python | track/replay_unpack/clients/wows/network/packets/EntityCreate.py | chemputer/track | 377b4df09563e23c41da018f6f8fa6b0b2875503 | [
"MIT"
] | 1 | 2021-09-17T10:42:39.000Z | 2021-09-17T10:42:39.000Z | data/replay_unpack/clients/wows/network/packets/EntityCreate.py | chemputer/track-docker | d257d15b0ea7c2281052cd0b2d0e49f06b75af1b | [
"MIT"
] | null | null | null | data/replay_unpack/clients/wows/network/packets/EntityCreate.py | chemputer/track-docker | d257d15b0ea7c2281052cd0b2d0e49f06b75af1b | [
"MIT"
] | null | null | null | # coding=utf-8
import struct
from replay_unpack.core import PrettyPrintObjectMixin
from replay_unpack.core.network.types import BinaryStream
from replay_unpack.core.network.types import Vector3
class EntityCreate(PrettyPrintObjectMixin):
def __init__(self, stream):
self.entityID, = struct.unpack('i', stream.read(4))
self.type, = struct.unpack('h', stream.read(2))
self.vehicleId, = struct.unpack('i', stream.read(4))
self.spaceId, = struct.unpack('i', stream.read(4))
self.position = Vector3(stream)
self.direction = Vector3(stream)
self.state = BinaryStream(stream)
| 33.315789 | 60 | 0.704581 |
acfa1adb22ed2acd14f479762158d48107eea1b6 | 3,747 | py | Python | pybleau/plotly_api/plotly_fig_utils.py | jonathanrocher/pybleau | fa6e7841a664268a4eef0ed6cc0ff7720e1113e1 | [
"MIT"
] | 4 | 2020-02-27T22:38:29.000Z | 2021-05-03T05:32:11.000Z | pybleau/plotly_api/plotly_fig_utils.py | jonathanrocher/pybleau | fa6e7841a664268a4eef0ed6cc0ff7720e1113e1 | [
"MIT"
] | 85 | 2020-02-04T21:57:14.000Z | 2021-05-03T14:29:40.000Z | pybleau/plotly_api/plotly_fig_utils.py | jonathanrocher/pybleau | fa6e7841a664268a4eef0ed6cc0ff7720e1113e1 | [
"MIT"
] | 1 | 2020-02-20T00:45:09.000Z | 2020-02-20T00:45:09.000Z | import logging
import plotly.graph_objs as go
logger = logging.getLogger(__name__)
def wrap_renderers(renderer_list, target="ipython", **kwargs):
""" Wrap the list of renderers according to the specified target.
Parameters
----------
target : str
Where/how to the renderer list will be consumed. Can be 'ipython',
'fig', or 'renderers'.
renderer_list : list
List of plotly renderers (traces) to wrap.
kwargs : dict
Key words to build the figure around the renderers. See
:func:`plotly_fig_from_data_list` for details.
Returns
-------
Figure, list or None
Returns whatever is needed to render the list of renderers
appropriately.
"""
if target in {"ipython", "fig"}:
fig = plotly_fig_from_data_list(renderer_list, **kwargs)
if target == "ipython":
import plotly.offline as offline
offline.init_notebook_mode(connected=False)
return offline.iplot(fig)
else:
return fig
elif target == "renderers":
return renderer_list
else:
msg = "Bad value for `target` argument: supported values are " \
"'ipython', 'fig' or 'renderers'."
raise ValueError(msg)
def plotly_fig_from_data_list(renderer_list, title="", x_scale="linear",
x_title="", y_scale="linear", y_title="",
z_title="", z_scale="linear", x_tickangle=0,
ticklen=5, gridwidth=2, hovermode='closest',
showlegend=True, fig_height=600, fig_width=800,
**kwargs):
""" Returns a plotly Figure containing desired layout and data provided.
Parameters
----------
renderer_list : list
List of plotly traces to build the figure from.
title : str, optional
Figure title.
x_title, y_title, z_title : str, optional
Text to write along the plots axes.
x_scale, y_scale, z_scale : str, optional
Type of axis scale to use. Values supported are None, 'linear' and
'log'.
ticklen : int, optional
Length of the tick marks in both directions.
x_tickangle : int, optional
Rotation angle for the x tick labels.
gridwidth : int, optional
Width of the grid in both directions.
hovermode : str, optional
Style of the hover feature.
showlegend : bool, optional
Whether to display a legend or not.
fig_height : int, optional
Height of the figure in pixels.
fig_width : int, optional
Width of the figure in pixels.
**kwargs : dict, optional
Additional keywords to build the figure Layout.
"""
layout_kw = dict(
xaxis=dict(
type=x_scale,
title=x_title,
ticklen=ticklen,
tickangle=x_tickangle,
zeroline=False,
gridwidth=gridwidth,
),
yaxis=dict(
type=y_scale,
title=y_title,
ticklen=ticklen,
gridwidth=gridwidth,
)
)
if z_title:
layout_kw = dict(
scene=go.Scene(
zaxis=dict(
type=z_scale,
title=z_title,
ticklen=ticklen,
gridwidth=gridwidth,
),
**layout_kw
)
)
layout_kw.update(kwargs)
layout = go.Layout(
title=title,
hovermode=hovermode,
showlegend=showlegend,
height=fig_height, width=fig_width,
**layout_kw
)
fig = go.Figure(data=renderer_list, layout=layout)
return fig
| 27.755556 | 77 | 0.569255 |
acfa1b10df8db6a40454edd378c82eeec082c05c | 256 | py | Python | tests/unit/utils/test_kickstart.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 9,425 | 2015-01-01T05:59:24.000Z | 2022-03-31T20:44:05.000Z | tests/unit/utils/test_kickstart.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 33,507 | 2015-01-01T00:19:56.000Z | 2022-03-31T23:48:20.000Z | tests/unit/utils/test_kickstart.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 5,810 | 2015-01-01T19:11:45.000Z | 2022-03-31T02:37:20.000Z | import salt.utils.kickstart as kickstart
from tests.support.unit import TestCase
class KickstartTestCase(TestCase):
def test_clean_args(self):
ret = kickstart.clean_args({"foo": "bar", "baz": False})
assert ret == {"foo": "bar"}, ret
| 28.444444 | 64 | 0.683594 |
acfa1c1d35271db619199ecc6c7e3857679c5076 | 14,101 | py | Python | scripts/icon_studio/controllers/icon_creator/icon_creator.py | MaxGaukler/webots | 3a3af793d81716dd29785771d6015d4431650581 | [
"Apache-2.0"
] | 1 | 2019-01-21T07:14:55.000Z | 2019-01-21T07:14:55.000Z | scripts/icon_studio/controllers/icon_creator/icon_creator.py | michou214/webots | eebddc352b44edc84e84e06fec1c0f96aa721372 | [
"Apache-2.0"
] | null | null | null | scripts/icon_studio/controllers/icon_creator/icon_creator.py | michou214/webots | eebddc352b44edc84e84e06fec1c0f96aa721372 | [
"Apache-2.0"
] | null | null | null | # Copyright 1996-2020 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup the studio, and generate continuously icons in the target directory."""
import colorsys
import json
import math
import optparse
import fnmatch
import os
import shutil
import sys
from controller import Supervisor
from PIL import Image, ImageChops
# Considerations:
# - The shadow should appear on the objects right.
# - The robot and the shadow should appear entirely in the resulted screenshot.
# - If possible, physics should have run, the object should have a dynamic pose, and the robot "eyes" should look at the camera.
RED = 0
GREEN = 1
BLUE = 2
HUE = 0
LIGHTNESS = 1
SATURATION = 2
WHITE = [1, 1, 1]
def get_options():
"""Parse the controler arguments."""
optParser = optparse.OptionParser()
optParser.add_option("--disable-icon-copy", dest="disableIconCopy", action="store_true", default=False,
help="Disable the copy of the icons.")
optParser.add_option("--json-file", dest="file", default="objects.json", help="Specify the JSON file to use.")
optParser.add_option("--single-shot", dest="singleShot", action="store_true", default=False,
help="Take only a screenshot of the current world.")
optParser.add_option("--appearance", dest="appearance", action="store_true", default=False,
help="Create the screenshot for all the appearances.")
options, args = optParser.parse_args()
return options
def take_original_screenshot(camera, directory):
"""Take a screenshot without alpha and crop."""
image = camera.getImage()
pilImage = Image.frombytes('RGBA', (camera.getWidth(), camera.getHeight()), image, 'raw', 'BGRA')
pilImage.save(os.path.join(directory, 'original.png'))
def autocrop(im):
"""Autocrop an image based on its upperleft pixel."""
# reference: https://stackoverflow.com/a/48605963/2210777
bg = Image.new(im.mode, im.size, im.getpixel((0, 0)))
diff = ImageChops.difference(im, bg)
diff = ImageChops.add(diff, diff, 2.0)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
def take_screenshot(camera, category, directory, protoDirectory, protoName, options, background, colorThreshold,
shadowColor=None, namePostfix=''):
"""Take the screenshot."""
# Convert Camera image to PIL image.
image = camera.getImage()
pilImage = Image.frombytes('RGBA', (camera.getWidth(), camera.getHeight()), image, 'raw', 'BGRA')
pilImage = autocrop(pilImage) # cropped at an early stage to save lot of CPU resources.
pixels = pilImage.getdata()
# Remove the background.
background = [float(pixels[0][0]) / 255.0, float(pixels[0][1]) / 255.0, float(pixels[0][2]) / 255.0]
newPixels = []
hls_background_color = colorsys.rgb_to_hls(background[RED], background[GREEN], background[BLUE])
for pixel in pixels:
hls_pixel = colorsys.rgb_to_hls(float(pixel[RED]) / 255.0, float(pixel[GREEN]) / 255.0, float(pixel[BLUE]) / 255.0)
if (abs(hls_pixel[HUE] - hls_background_color[HUE]) < colorThreshold and
abs(hls_pixel[LIGHTNESS] - hls_background_color[LIGHTNESS]) < colorThreshold and
abs(hls_pixel[SATURATION] - hls_background_color[SATURATION]) < colorThreshold):
# Background
newPixels.append((0, 0, 0, 0))
elif (shadowColor is not None and
shadowColor[RED] == pixel[RED] and
shadowColor[GREEN] == pixel[GREEN] and
shadowColor[BLUE] == pixel[BLUE]):
# Shadows
newPixels.append((125, 125, 125, 120))
else:
# Object
newPixels.append(pixel)
pilImage.putdata(newPixels)
# Uncomment to show the result image:
# pilImage.show()
# Save model.png (cropped) and icon.png (scaled down)
pilImage.save(os.path.join(directory, 'model.png'))
pilImage.thumbnail((128, 128), Image.ANTIALIAS)
iconImage = Image.new('RGBA', (128, 128))
iconImage.paste(pilImage, (int((128 - pilImage.size[0]) / 2), int((128 - pilImage.size[1]) / 2),
int((128 - pilImage.size[0]) / 2) + pilImage.size[0], int((128 - pilImage.size[1]) / 2) + pilImage.size[1]))
iconImage.save(os.path.join(directory, 'icon.png'))
if not options.disableIconCopy:
# copy icons in the appropriate directory
iconsFolder = os.environ['WEBOTS_HOME'] + os.sep + protoDirectory + os.sep + 'icons'
iconPath = iconsFolder + os.sep + protoName + '.png'
if not os.path.exists(iconsFolder):
os.makedirs(iconsFolder)
if os.path.exists(iconPath):
os.remove(iconPath)
shutil.copy2(directory + os.sep + 'icon.png', iconPath)
categoryFolder = os.path.basename(os.path.dirname(protoDirectory))
# copy the models in the docs directory
modelFolder = os.path.join(os.environ['WEBOTS_HOME'], 'docs', 'guide', 'images', category, categoryFolder, protoName)
modelPath = os.path.join(modelFolder, 'model' + namePostfix + '.png')
if category == categoryFolder: # appearances
modelFolder = os.path.join(os.environ['WEBOTS_HOME'], 'docs', 'guide', 'images', category)
modelPath = os.path.join(modelFolder, protoName + namePostfix + '.png')
elif category == 'robots':
modelFolder = os.path.join(os.environ['WEBOTS_HOME'], 'docs', 'guide', 'images', category, categoryFolder)
modelPath = os.path.join(modelFolder, protoName + namePostfix + '.png')
if not os.path.exists(modelFolder):
os.makedirs(modelFolder)
if os.path.exists(modelPath):
os.remove(modelPath)
shutil.copy2(directory + os.sep + 'model.png', modelPath)
def process_appearances(supervisor, parameters):
"""Import the appearances, take a screenshot and remove it."""
objectDirectory = '.' + os.sep + 'images' + os.sep + 'appearances' + os.sep + protoName
if not os.path.exists(objectDirectory):
os.makedirs(objectDirectory)
else:
sys.exit('Multiple definition of ' + protoName)
protoPath = rootPath + os.sep + protoName
protoPath = protoPath.replace(os.environ['WEBOTS_HOME'], '')
nodeString = 'Transform { translation 0 1 0 rotation 0 0 1 0.262 children [ '
nodeString += 'Shape { '
nodeString += 'geometry Sphere { subdivision 5 } '
nodeString += 'castShadows FALSE '
nodeString += 'appearance %s { ' % protoName
if 'fields' in parameters:
assert type(parameters['fields']) is list
postfix = 'a'
for fields in parameters['fields']:
newNodeString = nodeString + fields
newNodeString += ' } } ] }'
process_object(controller, 'appearances', newNodeString, objectDirectory,
protoPath, background=[1, 1, 1], colorThreshold=0.01,
postfix=('_' + postfix if len(parameters['fields']) > 1 else ''))
postfix = chr(ord(postfix) + 1)
else:
nodeString += ' } } ] }'
process_object(controller, 'appearances', nodeString, objectDirectory,
protoPath, background=[1, 1, 1], colorThreshold=0.01)
def process_object(supervisor, category, nodeString, objectDirectory, protoPath, background, colorThreshold, postfix=''):
"""Import object, take screenshot and remove it."""
rootChildrenfield = controller.getRoot().getField('children')
# Apply the background color.
supervisor.getFromDef('FLOOR_MATERIAL').getField('diffuseColor').setSFColor(WHITE)
# import the object
count = rootChildrenfield.getCount()
rootChildrenfield.importMFNodeFromString(-1, nodeString)
supervisor.step(timeStep)
if rootChildrenfield.getCount() != count + 1:
sys.exit(protoName + ' was not imported sucessfully.')
importedNode = rootChildrenfield.getMFNode(-1)
supervisor.step(timeStep)
importedNode.moveViewpoint()
supervisor.simulationSetMode(Supervisor.SIMULATION_MODE_REAL_TIME)
supervisor.step(60 * timeStep)
# Set the camera at the right location.
position = viewpointPosition.getSFVec3f()
supervisorTranslation.setSFVec3f(position)
supervisorRotation.setSFRotation(viewpointOrientation.getSFRotation())
# compute distance to the object (assuming object is at the origin) to set a correct near value
distance = math.sqrt(math.pow(position[0], 2) + math.pow(position[0], 2) + math.pow(position[0], 2))
if distance < 1:
cameraNear.setSFFloat(0.1)
elif distance < 5:
cameraNear.setSFFloat(0.2)
elif distance < 10:
cameraNear.setSFFloat(0.5)
else:
cameraNear.setSFFloat(1)
supervisor.step(timeStep)
take_original_screenshot(camera, objectDirectory)
supervisor.getFromDef('FLOOR_MATERIAL').getField('diffuseColor').setSFColor(background)
lightIntensityField = supervisor.getFromDef('LIGHT').getField('intensity')
lightIntensity = lightIntensityField.getSFFloat()
lightIntensityField.setSFFloat(0)
supervisor.step(10 * timeStep)
pixel = camera.getImageArray()[0][0]
shadowColor = [pixel[0], pixel[1], pixel[2]]
lightIntensityField.setSFFloat(lightIntensity)
supervisor.step(10 * timeStep)
take_screenshot(camera, category, objectDirectory, os.path.dirname(protoPath), protoName, options, background,
colorThreshold, shadowColor, postfix)
# remove the object
supervisor.step(timeStep)
count = rootChildrenfield.getCount()
importedNode.remove()
supervisor.step(timeStep)
if rootChildrenfield.getCount() != count - 1:
sys.exit(protoName + ' was not removed sucessfully.')
# Initialize the Supervisor.
controller = Supervisor()
timeStep = int(controller.getBasicTimeStep())
camera = controller.getCamera('camera')
camera.enable(timeStep)
options = get_options()
if os.path.exists('.' + os.sep + 'images'):
shutil.rmtree('.' + os.sep + 'images')
# Get required fields
rootChildrenfield = controller.getRoot().getField('children')
supervisorTranslation = controller.getFromDef('SUPERVISOR').getField('translation')
supervisorRotation = controller.getFromDef('SUPERVISOR').getField('rotation')
viewpointPosition = controller.getFromDef('VIEWPOINT').getField('position')
viewpointOrientation = controller.getFromDef('VIEWPOINT').getField('orientation')
cameraNear = controller.getFromDef('CAMERA').getField('near')
if options.singleShot:
node = controller.getFromDef('OBJECTS')
if node is None:
sys.exit('No node "OBJECTS" found.')
take_original_screenshot(camera, '.' + os.sep + 'images')
take_screenshot(camera, 'objects', '.' + os.sep + 'images', os.path.dirname(controller.getWorldPath()), node.getTypeName(),
None)
elif options.appearance:
with open('appearances.json') as json_data:
data = json.load(json_data)
appearanceFolder = os.path.join(os.environ['WEBOTS_HOME'], 'projects')
appearanceFolder = os.path.join(appearanceFolder, 'appearances')
appearanceFolder = os.path.join(appearanceFolder, 'protos')
for rootPath, dirNames, fileNames in os.walk(appearanceFolder):
for fileName in fnmatch.filter(fileNames, '*.proto'):
protoName = fileName.split('.')[0]
if protoName not in data:
print('Skipping "%s" PROTO.' % protoName)
continue
process_appearances(controller, data[protoName])
else:
with open(options.file) as json_data:
data = json.load(json_data)
print('%d objects' % (len(data) - 1))
itemCounter = 0
for key, value in data.items():
if key == 'default':
continue
itemCounter += 1
protoName = os.path.basename(key).split('.')[0]
if sys.version_info[0] < 3:
protoName = protoName.encode('utf-8')
protoPath = key
print('%s [%d%%]' % (protoName, 100.0 * itemCounter / (len(data) - 1)))
objectDirectory = '.' + os.sep + 'images' + os.sep + os.path.basename(os.path.dirname(os.path.dirname(key)))
objectDirectory += os.sep + protoName
if not os.path.exists(objectDirectory):
os.makedirs(objectDirectory)
else:
sys.exit('Multiple definition of ' + protoName)
if 'colorThreshold' in value:
colorThreshold = value['colorThreshold']
else:
colorThreshold = data['default']['colorThreshold']
if 'background' in value:
background = value['background']
else:
background = data['default']['background']
if 'fields' in value:
fields = value['fields']
else:
fields = data['default']['fields']
nodeString = protoName + '{ '
if sys.version_info[0] < 3:
nodeString += fields.encode('utf-8')
else:
nodeString += fields
nodeString += ' }'
if 'nodeString' in value:
if sys.version_info[0] < 3:
nodeString = value['nodeString'].encode('utf-8')
else:
nodeString = value['nodeString']
process_object(controller, key.split('/')[1], nodeString, objectDirectory, protoPath,
background=background, colorThreshold=colorThreshold)
| 43.521605 | 128 | 0.647543 |
acfa1c275fea90127a2f28cb25a5bb30a08cd23f | 3,190 | py | Python | profiles_project/settings.py | pralakxavier/profiles-rest-api | 651f2c8181243950647cd557e4b7d958707d8a60 | [
"MIT"
] | null | null | null | profiles_project/settings.py | pralakxavier/profiles-rest-api | 651f2c8181243950647cd557e4b7d958707d8a60 | [
"MIT"
] | 7 | 2020-06-06T01:52:34.000Z | 2022-02-10T10:22:10.000Z | profiles_project/settings.py | pralakxavier/profiles-rest-api | 651f2c8181243950647cd557e4b7d958707d8a60 | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h+hl54_^6^*mo6bc^(gn-0gxcy(6m1%$2f8p_6f5e^3bm+q6@1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| 25.725806 | 91 | 0.697806 |
acfa1f6519cd65d4ccb72378f23aab60051b33de | 2,579 | py | Python | pyro/distributions/von_mises_3d.py | futurewarning/pyro | 005032f10099188fea86f63b6baa46a27867983f | [
"Apache-2.0"
] | 1 | 2021-02-08T22:53:23.000Z | 2021-02-08T22:53:23.000Z | pyro/distributions/von_mises_3d.py | futurewarning/pyro | 005032f10099188fea86f63b6baa46a27867983f | [
"Apache-2.0"
] | null | null | null | pyro/distributions/von_mises_3d.py | futurewarning/pyro | 005032f10099188fea86f63b6baa46a27867983f | [
"Apache-2.0"
] | 1 | 2021-04-11T21:37:25.000Z | 2021-04-11T21:37:25.000Z | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import torch
from . import constraints
from .torch_distribution import TorchDistribution
class VonMises3D(TorchDistribution):
"""
Spherical von Mises distribution.
This implementation combines the direction parameter and concentration
parameter into a single combined parameter that contains both direction and
magnitude. The ``value`` arg is represented in cartesian coordinates: it
must be a normalized 3-vector that lies on the 2-sphere.
See :class:`~pyro.distributions.VonMises` for a 2D polar coordinate cousin
of this distribution. See :class:`~pyro.distributions.projected_normal` for
a qualitatively similar distribution but implementing more functionality.
Currently only :meth:`log_prob` is implemented.
:param torch.Tensor concentration: A combined location-and-concentration
vector. The direction of this vector is the location, and its
magnitude is the concentration.
"""
arg_constraints = {'concentration': constraints.real}
support = constraints.sphere
def __init__(self, concentration, validate_args=None):
if concentration.dim() < 1 or concentration.shape[-1] != 3:
raise ValueError('Expected concentration to have rightmost dim 3, actual shape = {}'.format(
concentration.shape))
self.concentration = concentration
batch_shape, event_shape = concentration.shape[:-1], concentration.shape[-1:]
super().__init__(batch_shape, event_shape, validate_args=validate_args)
def log_prob(self, value):
if self._validate_args:
if value.dim() < 1 or value.shape[-1] != 3:
raise ValueError('Expected value to have rightmost dim 3, actual shape = {}'.format(
value.shape))
if not (torch.abs(value.norm(2, -1) - 1) < 1e-6).all():
raise ValueError('direction vectors are not normalized')
scale = self.concentration.norm(2, -1)
log_normalizer = scale.log() - scale.sinh().log() - math.log(4 * math.pi)
return (self.concentration * value).sum(-1) + log_normalizer
def expand(self, batch_shape):
try:
return super().expand(batch_shape)
except NotImplementedError:
validate_args = self.__dict__.get('_validate_args')
concentration = self.concentration.expand(torch.Size(batch_shape) + (3,))
return type(self)(concentration, validate_args=validate_args)
| 42.983333 | 104 | 0.6867 |
acfa225d670bd9d640f6aab496ee87654e9c91ac | 1,035 | py | Python | ultros_site/routes/downloads.py | tsao-chi/Site | e3fc4574101b8cdacb2a28e54495da5376dd5396 | [
"MIT",
"Artistic-2.0",
"BSD-3-Clause"
] | 2 | 2017-06-25T20:57:40.000Z | 2017-11-27T15:13:35.000Z | ultros_site/routes/downloads.py | tsao-chi/Site | e3fc4574101b8cdacb2a28e54495da5376dd5396 | [
"MIT",
"Artistic-2.0",
"BSD-3-Clause"
] | 5 | 2017-06-23T12:05:57.000Z | 2021-06-30T05:46:44.000Z | ultros_site/routes/downloads.py | tsao-chi/Site | e3fc4574101b8cdacb2a28e54495da5376dd5396 | [
"MIT",
"Artistic-2.0",
"BSD-3-Clause"
] | 3 | 2018-01-08T04:57:12.000Z | 2020-01-22T08:03:56.000Z | # coding=utf-8
from falcon import HTTPTemporaryRedirect
from ultros_site.base_route import BaseRoute
from ultros_site.database.schema.product import Product
from ultros_site.message import Message
__author__ = "Momo"
class DownloadsViewRoute(BaseRoute):
route = "/downloads"
def on_get(self, req, resp):
db_session = req.context["db_session"]
products_count = db_session.query(Product).count()
if products_count == 0:
resp.append_header("Refresh", "10;url=/")
return self.render_template(
req, resp, "message_gate.html",
gate_message=Message(
"danger", "WIP", "The downloads area is under construction. Come back later!"
),
redirect_uri="/"
)
else:
products = db_session.query(Product).order_by(Product.order, Product.id).all()
default_product = products[0]
raise HTTPTemporaryRedirect("/downloads/{}".format(default_product.id))
| 30.441176 | 97 | 0.629952 |
acfa22e58479797d748722888a1564ac09fdd98d | 12,415 | py | Python | homeassistant/components/zwave_js/light.py | miccico/core | 14c205384171dee59c1a908f8449f9864778b2dc | [
"Apache-2.0"
] | 3 | 2021-04-27T16:37:48.000Z | 2022-02-23T02:47:33.000Z | homeassistant/components/zwave_js/light.py | miccico/core | 14c205384171dee59c1a908f8449f9864778b2dc | [
"Apache-2.0"
] | 33 | 2021-02-19T07:20:28.000Z | 2022-03-31T06:06:32.000Z | homeassistant/components/zwave_js/light.py | miccico/core | 14c205384171dee59c1a908f8449f9864778b2dc | [
"Apache-2.0"
] | 1 | 2021-02-08T08:43:22.000Z | 2021-02-08T08:43:22.000Z | """Support for Z-Wave lights."""
import logging
from typing import Any, Callable, Optional, Tuple
from zwave_js_server.client import Client as ZwaveClient
from zwave_js_server.const import CommandClass
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
DOMAIN as LIGHT_DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.color as color_util
from .const import DATA_CLIENT, DATA_UNSUBSCRIBE, DOMAIN
from .discovery import ZwaveDiscoveryInfo
from .entity import ZWaveBaseEntity
LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: Callable
) -> None:
"""Set up Z-Wave Light from Config Entry."""
client: ZwaveClient = hass.data[DOMAIN][config_entry.entry_id][DATA_CLIENT]
@callback
def async_add_light(info: ZwaveDiscoveryInfo) -> None:
"""Add Z-Wave Light."""
light = ZwaveLight(config_entry, client, info)
async_add_entities([light])
hass.data[DOMAIN][config_entry.entry_id][DATA_UNSUBSCRIBE].append(
async_dispatcher_connect(
hass,
f"{DOMAIN}_{config_entry.entry_id}_add_{LIGHT_DOMAIN}",
async_add_light,
)
)
def byte_to_zwave_brightness(value: int) -> int:
"""Convert brightness in 0-255 scale to 0-99 scale.
`value` -- (int) Brightness byte value from 0-255.
"""
if value > 0:
return max(1, round((value / 255) * 99))
return 0
class ZwaveLight(ZWaveBaseEntity, LightEntity):
"""Representation of a Z-Wave light."""
def __init__(
self, config_entry: ConfigEntry, client: ZwaveClient, info: ZwaveDiscoveryInfo
) -> None:
"""Initialize the light."""
super().__init__(config_entry, client, info)
self._supports_color = False
self._supports_white_value = False
self._supports_color_temp = False
self._hs_color: Optional[Tuple[float, float]] = None
self._white_value: Optional[int] = None
self._color_temp: Optional[int] = None
self._min_mireds = 153 # 6500K as a safe default
self._max_mireds = 370 # 2700K as a safe default
self._supported_features = SUPPORT_BRIGHTNESS
# get additional (optional) values and set features
self._target_value = self.get_zwave_value("targetValue")
self._dimming_duration = self.get_zwave_value("duration")
if self._dimming_duration is not None:
self._supported_features |= SUPPORT_TRANSITION
self._calculate_color_values()
if self._supports_color:
self._supported_features |= SUPPORT_COLOR
if self._supports_color_temp:
self._supported_features |= SUPPORT_COLOR_TEMP
if self._supports_white_value:
self._supported_features |= SUPPORT_WHITE_VALUE
@callback
def on_value_update(self) -> None:
"""Call when a watched value is added or updated."""
self._calculate_color_values()
@property
def brightness(self) -> int:
"""Return the brightness of this light between 0..255.
Z-Wave multilevel switches use a range of [0, 99] to control brightness.
"""
# prefer targetValue only if CC Version >= 4
# otherwise use currentValue (pre V4 dimmers)
if (
self._target_value
and self._target_value.value is not None
and self._target_value.cc_version >= 4
):
return round((self._target_value.value / 99) * 255)
if self.info.primary_value.value is not None:
return round((self.info.primary_value.value / 99) * 255)
return 0
@property
def is_on(self) -> bool:
"""Return true if device is on (brightness above 0)."""
return self.brightness > 0
@property
def hs_color(self) -> Optional[Tuple[float, float]]:
"""Return the hs color."""
return self._hs_color
@property
def white_value(self) -> Optional[int]:
"""Return the white value of this light between 0..255."""
return self._white_value
@property
def color_temp(self) -> Optional[int]:
"""Return the color temperature."""
return self._color_temp
@property
def min_mireds(self) -> int:
"""Return the coldest color_temp that this light supports."""
return self._min_mireds
@property
def max_mireds(self) -> int:
"""Return the warmest color_temp that this light supports."""
return self._max_mireds
@property
def supported_features(self) -> Optional[int]:
"""Flag supported features."""
return self._supported_features
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the device on."""
# RGB/HS color
hs_color = kwargs.get(ATTR_HS_COLOR)
if hs_color is not None and self._supports_color:
# set white levels to 0 when setting rgb
await self._async_set_color("Warm White", 0)
await self._async_set_color("Cold White", 0)
red, green, blue = color_util.color_hs_to_RGB(*hs_color)
await self._async_set_color("Red", red)
await self._async_set_color("Green", green)
await self._async_set_color("Blue", blue)
# Color temperature
color_temp = kwargs.get(ATTR_COLOR_TEMP)
if color_temp is not None and self._supports_color_temp:
# turn off rgb when setting white values
await self._async_set_color("Red", 0)
await self._async_set_color("Green", 0)
await self._async_set_color("Blue", 0)
# Limit color temp to min/max values
cold = max(
0,
min(
255,
round(
(self._max_mireds - color_temp)
/ (self._max_mireds - self._min_mireds)
* 255
),
),
)
warm = 255 - cold
await self._async_set_color("Warm White", warm)
await self._async_set_color("Cold White", cold)
# White value
white_value = kwargs.get(ATTR_WHITE_VALUE)
if white_value is not None and self._supports_white_value:
# turn off rgb when setting white values
await self._async_set_color("Red", 0)
await self._async_set_color("Green", 0)
await self._async_set_color("Blue", 0)
await self._async_set_color("Warm White", white_value)
# set brightness
await self._async_set_brightness(
kwargs.get(ATTR_BRIGHTNESS), kwargs.get(ATTR_TRANSITION)
)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the light off."""
await self._async_set_brightness(0, kwargs.get(ATTR_TRANSITION))
async def _async_set_color(self, color_name: str, new_value: int) -> None:
"""Set defined color to given value."""
cur_zwave_value = self.get_zwave_value(
"currentColor",
CommandClass.SWITCH_COLOR,
value_property_key_name=color_name,
)
# guard for unsupported command
if cur_zwave_value is None:
return
# actually set the new color value
target_zwave_value = self.get_zwave_value(
"targetColor",
CommandClass.SWITCH_COLOR,
value_property_key_name=color_name,
)
if target_zwave_value is None:
return
await self.info.node.async_set_value(target_zwave_value, new_value)
async def _async_set_brightness(
self, brightness: Optional[int], transition: Optional[int] = None
) -> None:
"""Set new brightness to light."""
if brightness is None and self.info.primary_value.value:
# there is no point in setting default brightness when light is already on
return
if brightness is None:
# Level 255 means to set it to previous value.
zwave_brightness = 255
else:
# Zwave multilevel switches use a range of [0, 99] to control brightness.
zwave_brightness = byte_to_zwave_brightness(brightness)
# set transition value before sending new brightness
await self._async_set_transition_duration(transition)
# setting a value requires setting targetValue
await self.info.node.async_set_value(self._target_value, zwave_brightness)
async def _async_set_transition_duration(
self, duration: Optional[int] = None
) -> None:
"""Set the transition time for the brightness value."""
if self._dimming_duration is None:
return
# pylint: disable=fixme,unreachable
# TODO: setting duration needs to be fixed upstream
# https://github.com/zwave-js/node-zwave-js/issues/1321
return
if duration is None: # type: ignore
# no transition specified by user, use defaults
duration = 7621 # anything over 7620 uses the factory default
else: # pragma: no cover
# transition specified by user
transition = duration
if transition <= 127:
duration = transition
else:
minutes = round(transition / 60)
LOGGER.debug(
"Transition rounded to %d minutes for %s",
minutes,
self.entity_id,
)
duration = minutes + 128
# only send value if it differs from current
# this prevents sending a command for nothing
if self._dimming_duration.value != duration: # pragma: no cover
await self.info.node.async_set_value(self._dimming_duration, duration)
@callback
def _calculate_color_values(self) -> None:
"""Calculate light colors."""
# RGB support
red_val = self.get_zwave_value(
"currentColor", CommandClass.SWITCH_COLOR, value_property_key_name="Red"
)
green_val = self.get_zwave_value(
"currentColor", CommandClass.SWITCH_COLOR, value_property_key_name="Green"
)
blue_val = self.get_zwave_value(
"currentColor", CommandClass.SWITCH_COLOR, value_property_key_name="Blue"
)
if red_val and green_val and blue_val:
self._supports_color = True
# convert to HS
if (
red_val.value is not None
and green_val.value is not None
and blue_val.value is not None
):
self._hs_color = color_util.color_RGB_to_hs(
red_val.value, green_val.value, blue_val.value
)
# White colors
ww_val = self.get_zwave_value(
"currentColor",
CommandClass.SWITCH_COLOR,
value_property_key_name="Warm White",
)
cw_val = self.get_zwave_value(
"currentColor",
CommandClass.SWITCH_COLOR,
value_property_key_name="Cold White",
)
if ww_val and cw_val:
# Color temperature (CW + WW) Support
self._supports_color_temp = True
# Calculate color temps based on whites
cold_level = cw_val.value or 0
if cold_level or ww_val.value is not None:
self._color_temp = round(
self._max_mireds
- ((cold_level / 255) * (self._max_mireds - self._min_mireds))
)
else:
self._color_temp = None
elif ww_val:
# only one white channel (warm white)
self._supports_white_value = True
self._white_value = ww_val.value
elif cw_val:
# only one white channel (cool white)
self._supports_white_value = True
self._white_value = cw_val.value
| 36.730769 | 86 | 0.619654 |
acfa2371efa585ca485708d9f15265cb549c4625 | 704 | py | Python | apps/accounts/urls.py | vicobits/django-wise | 3fdc01eabdff459b31e016f9f6d1cafc19c5a292 | [
"MIT"
] | 5 | 2020-04-11T20:11:48.000Z | 2021-03-16T23:58:01.000Z | apps/accounts/urls.py | victoraguilarc/django-wise | 3fdc01eabdff459b31e016f9f6d1cafc19c5a292 | [
"MIT"
] | 5 | 2020-04-11T20:17:56.000Z | 2021-06-16T19:18:29.000Z | apps/accounts/urls.py | victoraguilarc/django-wise | 3fdc01eabdff459b31e016f9f6d1cafc19c5a292 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.urls import path
from apps.accounts.views.confirm_email import ConfirmEmailView
from apps.accounts.views.reset_password import ResetPasswordView
from apps.accounts.views.phone_verification import PhoneVerificationView
app_name = 'accounts'
urlpatterns = [
# > Transactions
path(
'email-confirmation/<slug:token>/',
ConfirmEmailView.as_view(),
name='confirm-email',
),
path(
'reset-password/<slug:token>/',
view=ResetPasswordView.as_view(),
name='reset-password',
),
path(
'phone-verification/',
view=PhoneVerificationView.as_view(),
name='phone-verification',
),
]
| 25.142857 | 72 | 0.663352 |
acfa23c6ad976a79bb7fc9dadeaa22cb1545d34e | 397 | py | Python | odoo-13.0/addons/website_sale_wishlist/models/ir_autovacuum.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/website_sale_wishlist/models/ir_autovacuum.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/website_sale_wishlist/models/ir_autovacuum.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models
class AutoVacuum(models.AbstractModel):
_inherit = 'ir.autovacuum'
@api.model
def power_on(self, *args, **kwargs):
self.env['product.wishlist']._garbage_collector(*args, **kwargs)
return super(AutoVacuum, self).power_on(*args, **kwargs)
| 28.357143 | 74 | 0.685139 |
acfa243c17a3730523587d92c22832458a81491c | 165 | py | Python | hackerrank/Python/09_errors_and_exceptions/incorrect_regex.py | mizukirc/python-snippets | 92e36ba59e978dd764e1e4b570ac3b66c1381e69 | [
"MIT"
] | null | null | null | hackerrank/Python/09_errors_and_exceptions/incorrect_regex.py | mizukirc/python-snippets | 92e36ba59e978dd764e1e4b570ac3b66c1381e69 | [
"MIT"
] | null | null | null | hackerrank/Python/09_errors_and_exceptions/incorrect_regex.py | mizukirc/python-snippets | 92e36ba59e978dd764e1e4b570ac3b66c1381e69 | [
"MIT"
] | null | null | null | import re
T = int(input())
for i in range(T):
try:
S = input()
re.compile(S)
print('True')
except re.error:
print('False') | 15 | 22 | 0.484848 |
acfa24627e354315016204205f08692c41ca0647 | 3,062 | py | Python | core/corr.py | hologerry/RAFT | a80209c442ea2e2a8860af3c9ca96e62498533ca | [
"BSD-3-Clause"
] | null | null | null | core/corr.py | hologerry/RAFT | a80209c442ea2e2a8860af3c9ca96e62498533ca | [
"BSD-3-Clause"
] | null | null | null | core/corr.py | hologerry/RAFT | a80209c442ea2e2a8860af3c9ca96e62498533ca | [
"BSD-3-Clause"
] | null | null | null | import torch
import torch.nn.functional as F
from .utils.utils import bilinear_sampler, coords_grid
try:
import alt_cuda_corr
except:
# alt_cuda_corr is not compiled
pass
class CorrBlock:
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.corr_pyramid = []
# all pairs correlation
corr = CorrBlock.corr(fmap1, fmap2)
batch, h1, w1, dim, h2, w2 = corr.shape
corr = corr.reshape(batch*h1*w1, dim, h2, w2)
self.corr_pyramid.append(corr)
for i in range(self.num_levels-1):
corr = F.avg_pool2d(corr, 2, stride=2)
self.corr_pyramid.append(corr)
def __call__(self, coords):
r = self.radius
coords = coords.permute(0, 2, 3, 1)
batch, h1, w1, _ = coords.shape
out_pyramid = []
for i in range(self.num_levels):
corr = self.corr_pyramid[i]
dx = torch.linspace(-r, r, 2*r+1)
dy = torch.linspace(-r, r, 2*r+1)
delta = torch.stack(torch.meshgrid(dy, dx), axis=-1).to(coords.device)
centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i
delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2)
coords_lvl = centroid_lvl + delta_lvl
corr = bilinear_sampler(corr, coords_lvl)
corr = corr.view(batch, h1, w1, -1)
out_pyramid.append(corr)
out = torch.cat(out_pyramid, dim=-1)
return out.permute(0, 3, 1, 2).contiguous().float()
@staticmethod
def corr(fmap1, fmap2):
batch, dim, ht, wd = fmap1.shape
fmap1 = fmap1.view(batch, dim, ht*wd)
fmap2 = fmap2.view(batch, dim, ht*wd)
corr = torch.matmul(fmap1.transpose(1, 2), fmap2)
corr = corr.view(batch, ht, wd, 1, ht, wd)
return corr / torch.sqrt(torch.tensor(dim).float())
class AlternateCorrBlock:
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.pyramid = [(fmap1, fmap2)]
for i in range(self.num_levels):
fmap1 = F.avg_pool2d(fmap1, 2, stride=2)
fmap2 = F.avg_pool2d(fmap2, 2, stride=2)
self.pyramid.append((fmap1, fmap2))
def __call__(self, coords):
coords = coords.permute(0, 2, 3, 1)
B, H, W, _ = coords.shape
dim = self.pyramid[0][0].shape[1]
corr_list = []
for i in range(self.num_levels):
r = self.radius
fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1).contiguous()
fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1).contiguous()
coords_i = (coords / 2**i).reshape(B, 1, H, W, 2).contiguous()
corr, = alt_cuda_corr.forward(fmap1_i, fmap2_i, coords_i, r)
corr_list.append(corr.squeeze(1))
corr = torch.stack(corr_list, dim=1)
corr = corr.reshape(B, -1, H, W)
return corr / torch.sqrt(torch.tensor(dim).float())
| 32.924731 | 82 | 0.576094 |
acfa24c7a4eb5f9f5b110c3a7a3539aa58428450 | 4,146 | py | Python | webots_ros2_tests/test/test_system_epuck.py | TaoYibo1866/webots_ros2 | a72c164825663cebbfd27e0649ea51d3abf9bbed | [
"Apache-2.0"
] | 176 | 2019-09-06T07:02:05.000Z | 2022-03-27T12:41:10.000Z | webots_ros2_tests/test/test_system_epuck.py | TaoYibo1866/webots_ros2 | a72c164825663cebbfd27e0649ea51d3abf9bbed | [
"Apache-2.0"
] | 308 | 2019-08-20T12:56:23.000Z | 2022-03-29T09:49:22.000Z | webots_ros2_tests/test/test_system_epuck.py | omichel/webots_ros2 | 5b59d0b1fbeff4c3f75a447bd152c10853f4691b | [
"Apache-2.0"
] | 67 | 2019-11-03T00:58:09.000Z | 2022-03-18T07:11:28.000Z | #!/usr/bin/env python
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the `webots_ros2_epuck` package."""
# Launch the test locally: launch_test src/webots_ros2/webots_ros2_tests/test/test_system_epuck.py
import os
import pytest
import rclpy
from sensor_msgs.msg import Range, Image, LaserScan
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from launch import LaunchDescription
import launch_testing.actions
from ament_index_python.packages import get_package_share_directory
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.actions import IncludeLaunchDescription
from webots_ros2_tests.utils import TestWebots, initialize_webots_test
@pytest.mark.rostest
def generate_test_description():
initialize_webots_test()
epuck_webots = IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(get_package_share_directory('webots_ros2_epuck'), 'launch', 'robot_launch.py')
)
)
return LaunchDescription([
epuck_webots,
launch_testing.actions.ReadyToTest(),
])
class TestEpuck(TestWebots):
@classmethod
def setUpClass(cls):
rclpy.init()
@classmethod
def tearDownClass(cls):
rclpy.shutdown()
def setUp(self):
self.__node = rclpy.create_node('driver_tester')
self.wait_for_clock(self.__node, messages_to_receive=20)
def testCamera(self):
def on_image_message_received(message):
# There should be data values between 0 and 255 included
if message.data[0] > 0 and message.data[0] < 255:
return True
return False
self.wait_for_messages(self.__node, Image, '/camera', condition=on_image_message_received)
def testPs0(self):
def on_range_message_received(message):
# There should be a range bigger 0 and 2
if message.range >= 0. and message.range <= 2.:
return True
return False
self.wait_for_messages(self.__node, Range, '/ps0', condition=on_range_message_received)
def testToF(self):
def on_range_message_received(message):
# There should be a range bigger 0 and 2
if message.range >= 0. and message.range <= 2.:
return True
return False
self.wait_for_messages(self.__node, Range, '/tof', condition=on_range_message_received)
def testMovement(self):
publisher = self.__node.create_publisher(Twist, '/cmd_vel', 1)
def on_position_message_received(message):
twist_message = Twist()
twist_message.linear.x = 0.1
publisher.publish(twist_message)
# E_puck should move forward
if message.pose.pose.position.x > 0.5:
return True
return False
self.wait_for_messages(self.__node, Odometry, '/odom', condition=on_position_message_received)
def testScan(self):
def on_scan_message_received(message):
# There should be at least 1 range bigger than 0 and some = 0
number_of_zeroes = 0
number_of_non_zeroes = 0
for value in message.ranges:
if value == 0.:
number_of_zeroes += 1
elif value > 0.:
number_of_non_zeroes += 1
return number_of_zeroes > 0 and number_of_non_zeroes > 0
self.wait_for_messages(self.__node, LaserScan, '/scan', condition=on_scan_message_received)
def tearDown(self):
self.__node.destroy_node()
| 33.983607 | 103 | 0.681621 |
acfa25439c292115f259e624715718195310fa20 | 626 | py | Python | env/lib/python3.8/site-packages/plotly/validators/scatterpolargl/marker/line/_coloraxis.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/scatterpolargl/marker/line/_coloraxis.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/scatterpolargl/marker/line/_coloraxis.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class ColoraxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(
self,
plotly_name="coloraxis",
parent_name="scatterpolargl.marker.line",
**kwargs
):
super(ColoraxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", None),
edit_type=kwargs.pop("edit_type", "calc"),
regex=kwargs.pop("regex", "/^coloraxis([2-9]|[1-9][0-9]+)?$/"),
role=kwargs.pop("role", "info"),
**kwargs
)
| 31.3 | 75 | 0.589457 |
acfa260fffb88f34a2420f9350645cc742f1b717 | 5,411 | py | Python | tests/test_threads.py | sanketsaurav/nylas-python | 2a0ed54927bdec489035caa8cf45b2fc8c466964 | [
"MIT"
] | 89 | 2015-06-11T15:42:49.000Z | 2022-03-21T14:41:18.000Z | tests/test_threads.py | sanketsaurav/nylas-python | 2a0ed54927bdec489035caa8cf45b2fc8c466964 | [
"MIT"
] | 165 | 2015-06-23T04:20:29.000Z | 2022-03-26T01:17:44.000Z | tests/test_threads.py | sanketsaurav/nylas-python | 2a0ed54927bdec489035caa8cf45b2fc8c466964 | [
"MIT"
] | 52 | 2015-06-22T22:53:02.000Z | 2021-11-22T22:34:48.000Z | from datetime import datetime
import pytest
from urlobject import URLObject
from nylas.client.restful_models import Message, Draft, Label
from nylas.utils import timestamp_from_dt
@pytest.mark.usefixtures("mock_threads")
def test_thread_attrs(api_client):
thread = api_client.threads.first()
expected_first = datetime(2016, 1, 2, 3, 4, 5)
expected_last = datetime(2017, 1, 2, 3, 4, 5)
expected_last_received = datetime(2017, 1, 2, 3, 4, 5)
expected_last_sent = datetime(2017, 1, 1, 1, 1, 1)
assert thread.first_message_timestamp == timestamp_from_dt(expected_first)
assert thread.first_message_at == expected_first
assert thread.last_message_timestamp == timestamp_from_dt(expected_last)
assert thread.last_message_at == expected_last
assert thread.last_message_received_timestamp == timestamp_from_dt(
expected_last_received
)
assert thread.last_message_received_at == expected_last_received
assert thread.last_message_sent_timestamp == timestamp_from_dt(expected_last_sent)
assert thread.last_message_sent_at == expected_last_sent
def test_update_thread_attrs(api_client):
thread = api_client.threads.create()
first = datetime(2017, 2, 3, 10, 0, 0)
second = datetime(2016, 10, 5, 14, 30, 0)
# timestamps and datetimes are handled totally separately
thread.last_message_at = first
thread.last_message_timestamp = timestamp_from_dt(second)
assert thread.last_message_at == first
assert thread.last_message_timestamp == timestamp_from_dt(second)
# but datetimes overwrite timestamps when serializing to JSON
assert thread.as_json()["last_message_timestamp"] == timestamp_from_dt(first)
@pytest.mark.usefixtures("mock_threads")
def test_thread_folder(api_client):
thread = api_client.threads.first()
assert len(thread.labels) == 0 # pylint: disable=len-as-condition
assert len(thread.folders) == 1
assert thread.folders[0].display_name == "Inbox"
assert not thread.unread
assert thread.starred
@pytest.mark.usefixtures("mock_folder_account", "mock_threads", "mock_thread")
def test_thread_change(api_client):
thread = api_client.threads.first()
assert thread.starred
thread.unstar()
assert not thread.starred
thread.star()
assert thread.starred
thread.update_folder("qwer")
assert len(thread.folders) == 1
assert thread.folders[0].id == "qwer"
@pytest.mark.usefixtures("mock_threads", "mock_messages")
def test_thread_messages(api_client):
thread = api_client.threads.first()
assert thread.messages
assert all(isinstance(message, Message) for message in thread.messages)
@pytest.mark.usefixtures("mock_threads", "mock_drafts")
def test_thread_drafts(api_client):
thread = api_client.threads.first()
assert thread.drafts
assert all(isinstance(draft, Draft) for draft in thread.drafts)
@pytest.mark.usefixtures("mock_labelled_thread", "mock_labels")
def test_thread_label(api_client):
thread = api_client.threads.get(111)
assert len(thread.labels) == 2
assert all(isinstance(label, Label) for label in thread.labels)
returned = thread.add_label("fake1")
assert len(thread.labels) == 3
assert thread.labels == returned
returned = thread.remove_label("fake1")
assert len(thread.labels) == 2 # pylint: disable=len-as-condition
assert thread.labels == returned
@pytest.mark.usefixtures("mock_labelled_thread", "mock_labels")
def test_thread_labels(api_client):
thread = api_client.threads.get(111)
assert len(thread.labels) == 2
assert all(isinstance(label, Label) for label in thread.labels)
returned = thread.add_labels(["fake1", "fake2"])
assert len(thread.labels) == 4
assert thread.labels == returned
label_ids = [l.id for l in thread.labels]
returned = thread.remove_labels(label_ids)
assert len(thread.labels) == 0 # pylint: disable=len-as-condition
assert thread.labels == returned
@pytest.mark.usefixtures("mock_threads", "mock_thread")
def test_thread_read(api_client):
thread = api_client.threads.first()
assert thread.unread is False
thread.mark_as_unread()
assert thread.unread is True
thread.mark_as_read()
assert thread.unread is False
# mark_as_seen() is a synonym for mark_as_read()
thread.mark_as_unread()
assert thread.unread is True
thread.mark_as_seen()
assert thread.unread is False
@pytest.mark.usefixtures("mock_threads")
def test_thread_reply(api_client):
thread = api_client.threads.first()
draft = thread.create_reply()
assert isinstance(draft, Draft)
assert draft.thread_id == thread.id
assert draft.subject == thread.subject
@pytest.mark.usefixtures("mock_threads")
def test_filter_threads_dt(mocked_responses, api_client):
api_client.threads.where(started_before=datetime(2010, 6, 1)).all()
assert len(mocked_responses.calls) == 1
request = mocked_responses.calls[0].request
url = URLObject(request.url)
assert url.query_dict["started_before"] == "1275350400"
@pytest.mark.usefixtures("mock_threads")
def test_filter_threads_ts(mocked_responses, api_client):
api_client.threads.where(started_before=1275350400).all()
assert len(mocked_responses.calls) == 1
request = mocked_responses.calls[0].request
url = URLObject(request.url)
assert url.query_dict["started_before"] == "1275350400"
| 35.598684 | 86 | 0.743116 |
acfa268eceb37977ebc9e951c6405d95a71112e1 | 8,288 | py | Python | improvtime/improvtime.py | CakeArmy/OB13-Cogs | b54015d10a83cb9f843a93f0d1e63543d6b0862a | [
"MIT"
] | null | null | null | improvtime/improvtime.py | CakeArmy/OB13-Cogs | b54015d10a83cb9f843a93f0d1e63543d6b0862a | [
"MIT"
] | null | null | null | improvtime/improvtime.py | CakeArmy/OB13-Cogs | b54015d10a83cb9f843a93f0d1e63543d6b0862a | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2021 Obi-Wan3
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import random
import discord
from redbot.core import commands, Config
from redbot.core.utils.chat_formatting import humanize_list, pagify
class ImprovTime(commands.Cog):
"""
One Word Story Channel
Story improv, one word at a time.
"""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=14000605, force_registration=True)
default_guild = {"toggle": True, "channel": None, "use_phrases": True, "phrase_list": [], "allow_repeats": False, "blocklist": [], "word_limit": 1}
self.config.register_guild(**default_guild)
@commands.Cog.listener("on_message")
async def _message_listener(self, message: discord.Message):
if not message.guild:
return
story_channel = await self.config.guild(message.guild).channel()
channel_perms: discord.Permissions = message.channel.permissions_for(message.guild.me)
# Ignore these messages
if (
message.channel.id != story_channel or # Message not in story channel
await self.bot.cog_disabled_in_guild(self, message.guild) or # Cog disabled in guild
not await self.config.guild(message.guild).toggle() or # ImprovTime toggled off
message.author.bot or # Message author is a bot
story_channel is None or # Story channel not set
not channel_perms.read_message_history # Cannot read channel history
):
return
# Delete these messages
if (
len(message.content.strip().split()) > await self.config.guild(message.guild).word_limit() or # Message too long
(
not(await self.config.guild(message.guild).allow_repeats()) and
(await message.channel.history(limit=1, before=message).flatten())[0].author.id == message.author.id
) # Allow repeats is off and last message is also from same author
):
if channel_perms.manage_messages:
return await message.delete()
else:
return
# These messages are sentence endings
blocklist = await self.config.guild(message.guild).blocklist()
if (
message.content.strip().split()[-1][-1] in ["!", ".", "?"] and
message.author.id not in blocklist and
channel_perms.send_messages
):
sentence = message.content
async for m in message.channel.history(limit=None, before=message):
if (
not m.author.bot and
m.content.strip().split()[-1][-1] in ["!", ".", "?"] and
m.author.id not in blocklist
):
break
if not m.author.bot:
sentence = f"{m.content} {sentence}"
phrase_list = await self.config.guild(message.guild).phrase_list()
if await self.config.guild(message.guild).use_phrases() and phrase_list:
sentence = f"{random.choice(phrase_list)}\n\n{sentence}"
if len(message.content) == 1:
sentence = sentence[:-2] + sentence[-1]
return await message.channel.send(sentence)
@commands.guild_only()
@commands.mod()
@commands.group(name="improvtime")
async def _improvtime(self, ctx: commands.Context):
"""Settings for ImprovTime"""
@_improvtime.command(name="toggle")
async def _toggle(self, ctx: commands.Context, true_or_false: bool):
"""Toggle ImprovTime in this server."""
await self.config.guild(ctx.guild).toggle.set(true_or_false)
return await ctx.tick()
@_improvtime.command(name="channel")
async def _channel(self, ctx: commands.Context, channel: discord.TextChannel):
"""Set the ImprovTime story channel."""
await self.config.guild(ctx.guild).channel.set(channel.id)
return await ctx.tick()
@_improvtime.command(name="allowrepeats")
async def _allow_repeats(self, ctx: commands.Context, true_or_false: bool):
"""Toggle whether users can send multiple messages in a row."""
await self.config.guild(ctx.guild).allow_repeats.set(true_or_false)
return await ctx.tick()
@_improvtime.command(name="addphrase")
async def _add_phrase(self, ctx: commands.Context, *, phrase: str):
"""Add a phrase to the phraselist."""
async with self.config.guild(ctx.guild).phrase_list() as p:
p.append(phrase.strip())
return await ctx.tick()
@_improvtime.command(name="removephrase")
async def _remove_phrase(self, ctx: commands.Context, phrase_index: int):
"""Remove a phrase from the phraselist (see index from current settings)."""
async with self.config.guild(ctx.guild).phrase_list() as p:
p.pop(phrase_index)
return await ctx.tick()
@_improvtime.command(name="block")
async def _block(self, ctx: commands.Context, user: discord.Member):
"""Blocks a user from ending the sentence."""
async with self.config.guild(ctx.guild).blocklist() as b:
b.append(user.id)
return await ctx.tick()
@_improvtime.command(name="unblock")
async def _unblock(self, ctx: commands.Context, user: discord.Member):
"""Unblocks a user from ending the sentence."""
async with self.config.guild(ctx.guild).blocklist() as b:
try:
b.remove(user.id)
except ValueError:
pass
return await ctx.tick()
@_improvtime.command(name="wordlimit")
async def _word_limit(self, ctx: commands.Context, num: int):
"""Set the the maximum words allowed for each story message."""
if not num > 0:
return await ctx.send("Please enter a positive integer.")
await self.config.guild(ctx.guild).word_limit.set(num)
return await ctx.tick()
@commands.bot_has_permissions(embed_links=True)
@_improvtime.command(name="view")
async def _view(self, ctx: commands.Context):
"""View the current ImprovTime settings."""
settings = await self.config.guild(ctx.guild).all()
phrases = settings["phrase_list"]
phrases_string = ""
for phrase_index, phrase in enumerate(phrases):
phrases_string += f"{phrase_index}. {phrase}\n"
channel = None
if settings["channel"] and (ch := ctx.guild.get_channel(settings["channel"])):
channel = ch.mention
desc = f"""
**Toggle:** {settings["toggle"]}
**Channel:** {channel}
**Use Phrases:** {settings["use_phrases"]}
**Word Limit:** {settings["word_limit"]}
**Allow Repeat Messages from User:** {settings["allow_repeats"]}
**Sentence Ending Blocklist**: {humanize_list([(await self.bot.get_or_fetch_user(u)).mention for u in settings["blocklist"]]) or "None"}
**Prefix Phrases**:
{phrases_string or "None"}
"""
for p in pagify(desc):
await ctx.send(embed=discord.Embed(title="ImprovTime Settings", color=await ctx.embed_color(), description=p))
| 42.502564 | 155 | 0.640806 |
acfa27582855faa2e814074d0b53505d8bac4a49 | 2,444 | py | Python | usaspending_api/references/tests/integration/test_adv_search_filters_and_hashes.py | g4brielvs/usaspending-api | bae7da2c204937ec1cdf75c052405b13145728d5 | [
"CC0-1.0"
] | null | null | null | usaspending_api/references/tests/integration/test_adv_search_filters_and_hashes.py | g4brielvs/usaspending-api | bae7da2c204937ec1cdf75c052405b13145728d5 | [
"CC0-1.0"
] | null | null | null | usaspending_api/references/tests/integration/test_adv_search_filters_and_hashes.py | g4brielvs/usaspending-api | bae7da2c204937ec1cdf75c052405b13145728d5 | [
"CC0-1.0"
] | null | null | null | import pytest
from model_mommy import mommy
from rest_framework import status
from usaspending_api.references.models import FilterHash
HASH_ENDPOINT = "/api/v2/references/hash/"
FILTER_ENDPOINT = "/api/v2/references/filter/"
@pytest.fixture
def stored_hashes(db):
mommy.make("references.FilterHash", filter={}, hash="")
@pytest.mark.django_db
def test_missing_hash(client):
resp = client.post(
HASH_ENDPOINT, content_type="application/json", data={"hash": "1c89eccf09b7dc74a75b651af79602e7"},
)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_generate_hash_success(client):
resp = client.post(
FILTER_ENDPOINT, content_type="application/json", data={"filters": "Department of Transportation"},
)
assert resp.status_code == status.HTTP_200_OK
assert resp.data["hash"] == "1c89eccf09b7dc74a75b651af79602e7"
@pytest.mark.django_db
def test_new_hash(client):
filter_payload = {"filters": "Department of Transportation"}
resp = client.post(FILTER_ENDPOINT, content_type="application/json", data=filter_payload,)
resp = client.post(
HASH_ENDPOINT, content_type="application/json", data={"hash": "1c89eccf09b7dc74a75b651af79602e7"},
)
assert resp.status_code == status.HTTP_200_OK
assert resp.data["filter"] == filter_payload
@pytest.mark.django_db
def test_hash_algorithm(client):
import hashlib
import json
filter_payloads = [
{"filters": "Department of Transportation"},
{"filters": {"agency": {"name": "Department of Transportation"}}},
{"filters": {"agency": {"name": "DOT", "level": "toptier"}}},
{"filters": {"def_codes": ["A", "B", "C", "9"], "cfda": ["10.987", "19.001"]}},
{"filters": {"agency": {"name": "Department of Transportation"}}},
{"empty": None},
]
def get_hash_from_api(payload):
return client.post(FILTER_ENDPOINT, content_type="application/json", data=payload).data["hash"]
def hash_payload(payload):
m = hashlib.md5()
m.update(json.dumps(payload).encode("utf8"))
return str(m.hexdigest().encode("utf8"))[2:-1]
def get_filters_from_db(provided_hash):
return FilterHash.objects.get(hash=provided_hash).filter
for fp in filter_payloads:
print(fp)
assert get_hash_from_api(fp) == hash_payload(fp)
assert fp == get_filters_from_db(hash_payload(fp))
| 32.157895 | 107 | 0.686989 |
acfa2810d54b720d1b6f645ecb8b6445346ddc4f | 11,807 | py | Python | Course1FinalProject/controller2d.py | imjaya/Model_Predictive_Controller_for_waypoint_tracking_in_Carla | 7b15a2818268a19ce2e561519ffc4581a80c4226 | [
"Apache-2.0"
] | null | null | null | Course1FinalProject/controller2d.py | imjaya/Model_Predictive_Controller_for_waypoint_tracking_in_Carla | 7b15a2818268a19ce2e561519ffc4581a80c4226 | [
"Apache-2.0"
] | null | null | null | Course1FinalProject/controller2d.py | imjaya/Model_Predictive_Controller_for_waypoint_tracking_in_Carla | 7b15a2818268a19ce2e561519ffc4581a80c4226 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""
2D Controller Class to be used for the CARLA waypoint follower demo.
"""
import cutils
import numpy as np
class Controller2D(object):
def __init__(self, waypoints):
self.vars = cutils.CUtils()
self._current_x = 0
self._current_y = 0
self._current_yaw = 0
self._current_speed = 0
self._desired_speed = 0
self._current_frame = 0
self._current_timestamp = 0
self._start_control_loop = False
self._set_throttle = 0
self._set_brake = 0
self._set_steer = 0
self._waypoints = waypoints
self._conv_rad_to_steer = 180.0 / 70.0 / np.pi
self._pi = np.pi
self._2pi = 2.0 * np.pi
def update_values(self, x, y, yaw, speed, timestamp, frame):
self._current_x = x
self._current_y = y
self._current_yaw = yaw
self._current_speed = speed
self._current_timestamp = timestamp
self._current_frame = frame
if self._current_frame:
self._start_control_loop = True
def update_desired_speed(self):
min_idx = 0
min_dist = float("inf")
desired_speed = 0
for i in range(len(self._waypoints)):
dist = np.linalg.norm(np.array([
self._waypoints[i][0] - self._current_x,
self._waypoints[i][1] - self._current_y]))
if dist < min_dist:
min_dist = dist
min_idx = i
if min_idx < len(self._waypoints)-1:
desired_speed = self._waypoints[min_idx][2]
else:
desired_speed = self._waypoints[-1][2]
self._desired_speed = desired_speed
def update_waypoints(self, new_waypoints):
self._waypoints = new_waypoints
def get_commands(self):
return self._set_throttle, self._set_steer, self._set_brake
def set_throttle(self, input_throttle):
# Clamp the throttle command to valid bounds
throttle = np.fmax(np.fmin(input_throttle, 1.0), 0.0)
self._set_throttle = throttle
def set_steer(self, input_steer_in_rad):
# Covnert radians to [-1, 1]
input_steer = self._conv_rad_to_steer * input_steer_in_rad
# Clamp the steering command to valid bounds
steer = np.fmax(np.fmin(input_steer, 1.0), -1.0)
self._set_steer = steer
def set_brake(self, input_brake):
# Clamp the steering command to valid bounds
brake = np.fmax(np.fmin(input_brake, 1.0), 0.0)
self._set_brake = brake
def update_controls(self):
######################################################
# RETRIEVE SIMULATOR FEEDBACK
######################################################
x = self._current_x
y = self._current_y
yaw = self._current_yaw
v = self._current_speed
self.update_desired_speed()
v_desired = self._desired_speed
t = self._current_timestamp
waypoints = self._waypoints
throttle_output = 0
steer_output = 0
brake_output = 0
######################################################
######################################################
# MODULE 7: DECLARE USAGE VARIABLES HERE
######################################################
######################################################
"""
Use 'self.vars.create_var(<variable name>, <default value>)'
to create a persistent variable (not destroyed at each iteration).
This means that the value can be stored for use in the next
iteration of the control loop.
Example: Creation of 'v_previous', default value to be 0
self.vars.create_var('v_previous', 0.0)
Example: Setting 'v_previous' to be 1.0
self.vars.v_previous = 1.0
Example: Accessing the value from 'v_previous' to be used
throttle_output = 0.5 * self.vars.v_previous
"""
self.vars.create_var('v_previous', 0.0)
self.vars.create_var('t_previous', 0.0)
self.vars.create_var('error_previous', 0.0)
self.vars.create_var('integral_error_previous', 0.0)
self.vars.create_var('throttle_previous', 0.0)
# Skip the first frame to store previous values properly
if self._start_control_loop:
"""
Controller iteration code block.
Controller Feedback Variables:
x : Current X position (meters)
y : Current Y position (meters)
yaw : Current yaw pose (radians)
v : Current forward speed (meters per second)
t : Current time (seconds)
v_desired : Current desired speed (meters per second)
(Computed as the speed to track at the
closest waypoint to the vehicle.)
waypoints : Current waypoints to track
(Includes speed to track at each x,y
location.)
Format: [[x0, y0, v0],
[x1, y1, v1],
...
[xn, yn, vn]]
Example:
waypoints[2][1]:
Returns the 3rd waypoint's y position
waypoints[5]:
Returns [x5, y5, v5] (6th waypoint)
Controller Output Variables:
throttle_output : Throttle output (0 to 1)
steer_output : Steer output (-1.22 rad to 1.22 rad)
brake_output : Brake output (0 to 1)
"""
######################################################
######################################################
# MODULE 7: IMPLEMENTATION OF LONGITUDINAL CONTROLLER HERE
######################################################
######################################################
"""
Implement a longitudinal controller here. Remember that you can
access the persistent variables declared above here. For
example, can treat self.vars.v_previous like a "global variable".
"""
# Change these outputs with the longitudinal controller. Note that
# brake_output is optional and is not required to pass the
# assignment, as the car will naturally slow down over time.
kp = 1.0
ki = 0.2
kd = 0.01
throttle_output = 0
brake_output = 0
# pid control
st = t - self.vars.t_previous
# error term
e_v = v_desired - v
# I
inte_v = self.vars.integral_error_previous + e_v * st
# D
derivate = (e_v - self.vars.error_previous) / st
acc = kp * e_v + ki * inte_v + kd * derivate
if acc > 0:
throttle_output = (np.tanh(acc) + 1)/2
# throttle_output = max(0.0, min(1.0, throttle_output))
if throttle_output - self.vars.throttle_previous > 0.1:
throttle_output = self.vars.throttle_previous + 0.1
else:
throttle_output = 0
######################################################
######################################################
# MODULE 7: IMPLEMENTATION OF LATERAL CONTROLLER HERE
######################################################
######################################################
"""
Implement a lateral controller here. Remember that you can
access the persistent variables declared above here. For
example, can treat self.vars.v_previous like a "global variable".
"""
# Change the steer output with the lateral controller.
steer_output = 0
# Use stanley controller for lateral control
k_e = 0.3
slope = (waypoints[-1][1]-waypoints[0][1])/ (waypoints[-1][0]-waypoints[0][0])
a = -slope
b = 1.0
c = (slope*waypoints[0][0]) - waypoints[0][1]
# heading error
yaw_path = np.arctan2(waypoints[-1][1]-waypoints[0][1], waypoints[-1][0]-waypoints[0][0])
# yaw_path = np.arctan2(slope, 1.0) # This was turning the vehicle only to the right (some error)
yaw_diff_heading = yaw_path - yaw
if yaw_diff_heading > np.pi:
yaw_diff_heading -= 2 * np.pi
if yaw_diff_heading < - np.pi:
yaw_diff_heading += 2 * np.pi
# crosstrack erroe
current_xy = np.array([x, y])
crosstrack_error = np.min(np.sum((current_xy - np.array(waypoints)[:, :2])**2, axis=1))
yaw_cross_track = np.arctan2(y-waypoints[0][1], x-waypoints[0][0])
yaw_path2ct = yaw_path - yaw_cross_track
if yaw_path2ct > np.pi:
yaw_path2ct -= 2 * np.pi
if yaw_path2ct < - np.pi:
yaw_path2ct += 2 * np.pi
if yaw_path2ct > 0:
crosstrack_error = abs(crosstrack_error)
else:
crosstrack_error = - abs(crosstrack_error)
yaw_diff_crosstrack = np.arctan(k_e * crosstrack_error / (v))
# final expected steering
steer_expect = yaw_diff_crosstrack + yaw_diff_heading
if steer_expect > np.pi:
steer_expect -= 2 * np.pi
if steer_expect < - np.pi:
steer_expect += 2 * np.pi
steer_expect = min(1.22, steer_expect)
steer_expect = max(-1.22, steer_expect)
#update
steer_output = steer_expect
######################################################
# SET CONTROLS OUTPUT
######################################################
self.set_throttle(throttle_output) # in percent (0 to 1)
self.set_steer(steer_output) # in rad (-1.22 to 1.22)
self.set_brake(brake_output) # in percent (0 to 1)
######################################################
######################################################
# MODULE 7: STORE OLD VALUES HERE (ADD MORE IF NECESSARY)
######################################################
######################################################
"""
Use this block to store old values (for example, we can store the
current x, y, and yaw values here using persistent variables for use
in the next iteration)
"""
self.vars.v_previous = v # Store forward speed to be used in next step
self.vars.throttle_previous = throttle_output
self.vars.t_previous = t
self.vars.error_previous = e_v
self.vars.integral_error_previous = inte_v | 42.167857 | 110 | 0.466079 |
acfa2874d5a46f5b793e3838e1551297a68e464c | 10,724 | py | Python | action_plugins/docker-compose.py | locationlabs/ansible-role_docker-composable | ce73d1dc357d11155ae241d2300f99cd39231b75 | [
"Apache-2.0"
] | 1 | 2018-05-09T18:43:44.000Z | 2018-05-09T18:43:44.000Z | action_plugins/docker-compose.py | locationlabs/ansible-role_docker-composable | ce73d1dc357d11155ae241d2300f99cd39231b75 | [
"Apache-2.0"
] | null | null | null | action_plugins/docker-compose.py | locationlabs/ansible-role_docker-composable | ce73d1dc357d11155ae241d2300f99cd39231b75 | [
"Apache-2.0"
] | 3 | 2016-02-04T23:10:01.000Z | 2021-09-08T09:44:21.000Z | """
Ansible plugin for managing roles using docker-compose.
Check mode is supported to the extent that changes will not be made,
but not so much that expected changes are computed.
Each role is assumed to have its own docker-compose YAML file defining
some number of Docker containers (and images). The plugin handles managing
the YAML file, the Docker images, and the Docker containers.
"""
DOCUMENTATION = """
---
module: docker-compose
short_description: manage docker compose
description:
- Manage docker-compose YAML, images, and containers
options:
data:
description:
- docker-compose YAML data (passed as a complex argument)
required: true
role:
description:
- name of the rule
required: true
containers:
description:
- desired state of containers; one of 'absent', 'present', 'started', 'restarted'
required: false
images:
description:
- desired state of images; one of 'absent', 'latest', or 'present'.
required: false
"""
EXAMPLES = """
- docker-compose:
role: nginx
data:
nginx:
image: nginx:latest
ports:
- 80:80
images: latest
containers: started
"""
from tempfile import NamedTemporaryFile
from ansible.callbacks import vv, vvv
from ansible.runner.action_plugins.template import ActionModule as TemplateModule
from ansible.runner.return_data import ReturnData
from ansible.utils import parse_kv
ABSENT = "absent"
LATEST = "latest"
PRESENT = "present"
RESTARTED = "restarted"
STARTED = "started"
class ModuleError(Exception):
pass
class ActionModule(object):
def __init__(self, runner):
self.runner = runner
self.options = None
self.changed = False
self.conn = None
self.tmp = None
self.inject = None
@property
def basedir(self):
return self.runner.basedir
@property
def docker_compose_directory(self):
return "/etc/docker-compose/{}".format(self.role)
@property
def docker_compose_file(self):
return "{}/docker-compose.yml".format(self.docker_compose_directory)
@property
def role(self):
return self.options["role"]
@property
def data(self):
return self.options["data"]
@property
def images(self):
return [
container["image"]
for container in self.data.values()
if "image" in container
]
@property
def images_state(self):
return self.options.get("images")
@property
def containers_state(self):
return self.options.get("containers")
def execute_module(self,
module_name,
module_args,
complex_args=None):
module_response = self.runner._execute_module(
self.conn,
self.tmp,
module_name,
module_args,
inject=self.inject,
complex_args=complex_args,
)
return self.handle_module_result(module_response.result)
def handle_module_result(self, result, changed_if=None, failed_if=None):
changed = result.get("changed", False)
failed = result.get("failed", False)
msg = result.get("msg", result.get("stderr", ""))
vvv("result: failed={} changed={} msg={}".format(
failed,
changed,
msg,
))
if changed:
self.changed = True
if failed:
raise ModuleError(msg)
return result
def create_docker_compose_configuration_directory(self):
"""
Create directory for role-specific docker-compose.yml files.
Each role's compose file lives in `/etc/docker-compose/<role>/`
for clarity and to support container recreation outside of Ansible.
"""
vv("creating: docker-compose configuration directory for '{}'".format(
self.role,
))
module_args = "path={} state=directory".format(
self.docker_compose_directory,
)
return self.execute_module("file", module_args)
def remove_docker_compose_configuration_directory(self):
"""
Remove directory for role-specific docker-compose.yml files.
"""
vv("removing: docker-compose configuration directory for '{}'".format(
self.role
))
module_args = "path={} state=absent".format(
self.docker_compose_directory,
)
return self.execute_module("file", module_args)
def create_docker_compose_file(self):
"""
Create the role's docker-compose file.
"""
vv("creating: docker-compose configuration file for '{}'".format(
self.role,
))
module = TemplateModule(self.runner)
with NamedTemporaryFile() as template_file:
# Create a template file for the YAML data
template_file.write("{{ data|to_nice_yaml }}\n")
template_file.flush()
# Use the template module to create the file from YAML data.
module_args = "src={} dest={}".format(
template_file.name,
self.docker_compose_file,
)
module_inject = self.inject.copy()
module_inject["data"] = self.data
module_response = module.run(
self.conn, self.tmp, "template", module_args, inject=module_inject,
)
return self.handle_module_result(module_response.result)
def has_docker_compose_file(self):
"""
Does the role's docker-compose file exist?
"""
vv("checking: docker-compose configuration file for '{}'".format(
self.role,
))
module_args = "path={}".format(
self.docker_compose_file,
)
result = self.execute_module("stat", module_args)
return result["stat"]["exists"]
def remove_docker_compose_file(self):
"""
Remove the role's docker-compose file.
"""
vv("removing: docker-compose configuration file for '{}'".format(
self.role,
))
module_args = "path={} state=absent".format(
self.docker_compose_file,
)
return self.execute_module("file", module_args)
def create_docker_compose_containers(self):
"""
Create containers using docker-compose.
Containers will be forcibly recreated if the state is "restarted".
Note that docker-compose will recreate containers even if the state is
"started" if it detects a change to the image or configuration data. In
the event that recreation needs to be suppressed, docker-compose must be
told explicilty NOT to recreate containers. This behavior is not supported
at this time.
"""
vv("creating: docker-compose containers for '{}'".format(
self.role
))
module_args = "path={} state=started force={}".format(
self.docker_compose_file,
"true" if self.containers_state in (RESTARTED,) else "false",
)
return self.execute_module("docker-compose", module_args)
def remove_docker_compose_containers(self):
"""
Remove containers using docker-compose.
"""
vv("removing: docker-compose containers for '{}'".format(
self.role,
))
module_args = "path={} state=absent force=true".format(
self.docker_compose_file,
)
return self.execute_module("docker-compose", module_args)
def pull_images(self):
"""
Pull docker images.
"""
vv("pulling: docker images for '{}'".format(
self.role,
))
module_args = "state={}".format(
self.images_state,
)
return self.execute_module(
"docker-images",
module_args,
complex_args=dict(images=self.images),
)
def remove_images(self):
vv("removing: docker images for '{}'".format(
self.role,
))
module_args = "state={}".format(
self.images_state,
)
return self.execute_module(
"docker-images",
module_args,
complex_args=dict(images=self.images),
)
def set_options(self, module_args, complex_args):
parsed_args = parse_kv(module_args)
if complex_args:
parsed_args.update(complex_args)
self.options = {
key: parsed_args.get(key)
for key in ["containers", "data", "images", "role"]
}
if not self.options["role"]:
raise ModuleError("role is required")
if not self.data:
raise ModuleError("data is required")
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
"""
Run the action plugin.
"""
# save standard module args for convenience
self.conn, self.tmp, self.inject = conn, tmp, inject
if self.runner.check:
return ReturnData(
conn=conn,
result=dict(failed=False, changed=self.changed, msg="ok")
)
try:
# preserve and validate options
self.set_options(module_args, complex_args)
# pull image first (to allow for container restart)
if self.images_state in (PRESENT, LATEST):
self.pull_images()
if self.containers_state in (ABSENT,):
if self.has_docker_compose_file():
self.remove_docker_compose_containers()
self.remove_docker_compose_file()
self.remove_docker_compose_configuration_directory()
elif self.containers_state in (PRESENT, STARTED, RESTARTED):
self.create_docker_compose_configuration_directory()
self.create_docker_compose_file()
if self.containers_state in (STARTED, RESTARTED):
self.create_docker_compose_containers()
# remove image last (to allow for container removal first)
if self.images_state in (ABSENT,):
self.remove_images()
except ModuleError as error:
return ReturnData(
conn=conn,
result=dict(failed=True, changed=self.changed, msg=error.message)
)
else:
return ReturnData(
conn=conn,
result=dict(failed=False, changed=self.changed, msg="ok")
)
| 30.816092 | 93 | 0.592596 |
acfa28e8b6f10e9a7fc9a331d25dd502da5d4290 | 6,904 | py | Python | pymelcloud/device.py | vilppuvuorinen/pymelcloud | 89b6540d074a31221778f9e6ce8bc932926b54ef | [
"MIT"
] | 21 | 2020-04-06T16:31:06.000Z | 2022-03-17T17:27:38.000Z | pymelcloud/device.py | vilppuvuorinen/pymelcloud | 89b6540d074a31221778f9e6ce8bc932926b54ef | [
"MIT"
] | 16 | 2020-02-14T21:03:00.000Z | 2022-03-09T12:20:25.000Z | pymelcloud/device.py | vilppuvuorinen/pymelcloud | 89b6540d074a31221778f9e6ce8bc932926b54ef | [
"MIT"
] | 14 | 2020-02-10T20:15:47.000Z | 2022-03-19T22:47:42.000Z | """Base MELCloud device."""
import asyncio
from abc import ABC, abstractmethod
from datetime import datetime, timedelta, timezone
from typing import Any, Dict, List, Optional
from pymelcloud.client import Client
from pymelcloud.const import (
DEVICE_TYPE_LOOKUP,
DEVICE_TYPE_UNKNOWN,
UNIT_TEMP_CELSIUS,
UNIT_TEMP_FAHRENHEIT,
ACCESS_LEVEL,
)
PROPERTY_POWER = "power"
EFFECTIVE_FLAGS = "EffectiveFlags"
HAS_PENDING_COMMAND = "HasPendingCommand"
class Device(ABC):
"""MELCloud base device representation."""
def __init__(
self,
device_conf: Dict[str, Any],
client: Client,
set_debounce=timedelta(seconds=1),
):
"""Initialize a device."""
self.device_id = device_conf.get("DeviceID")
self.building_id = device_conf.get("BuildingID")
self.mac = device_conf.get("MacAddress")
self.serial = device_conf.get("SerialNumber")
self.access_level = device_conf.get("AccessLevel")
self._use_fahrenheit = False
if client.account is not None:
self._use_fahrenheit = client.account.get("UseFahrenheit", False)
self._device_conf = device_conf
self._state = None
self._device_units = None
self._client = client
self._set_debounce = set_debounce
self._set_event = asyncio.Event()
self._write_task: Optional[asyncio.Future[None]] = None
self._pending_writes: Dict[str, Any] = {}
def get_device_prop(self, name: str) -> Optional[Any]:
"""Access device properties while shortcutting the nested device access."""
device = self._device_conf.get("Device", {})
return device.get(name)
def get_state_prop(self, name: str) -> Optional[Any]:
"""Access state prop without None check."""
if self._state is None:
return None
return self._state.get(name)
@abstractmethod
def apply_write(self, state: Dict[str, Any], key: str, value: Any):
"""Apply writes to state object.
Used for property validation, do not modify device state.
"""
pass
async def update(self):
"""Fetch state of the device from MELCloud.
List of device_confs is also updated.
Please, rate limit calls to this method. Polling every 60 seconds should be
enough to catch all events at the rate they are coming in to MELCloud with the
exception of changes performed through MELCloud directly.
"""
await self._client.update_confs()
self._device_conf = next(
c
for c in self._client.device_confs
if c.get("DeviceID") == self.device_id
and c.get("BuildingID") == self.building_id
)
self._state = await self._client.fetch_device_state(self)
if self._device_units is None and self.access_level != ACCESS_LEVEL.get(
"GUEST"
):
self._device_units = await self._client.fetch_device_units(self)
async def set(self, properties: Dict[str, Any]):
"""Schedule property write to MELCloud."""
if self._write_task is not None:
self._write_task.cancel()
for k, value in properties.items():
if k == PROPERTY_POWER:
continue
self.apply_write({}, k, value)
self._pending_writes.update(properties)
self._write_task = asyncio.ensure_future(self._write())
await self._set_event.wait()
async def _write(self):
await asyncio.sleep(self._set_debounce.total_seconds())
new_state = self._state.copy()
for k, value in self._pending_writes.items():
if k == PROPERTY_POWER:
new_state["Power"] = value
new_state[EFFECTIVE_FLAGS] = new_state.get(EFFECTIVE_FLAGS, 0) | 0x01
else:
self.apply_write(new_state, k, value)
if new_state[EFFECTIVE_FLAGS] != 0:
new_state.update({HAS_PENDING_COMMAND: True})
self._pending_writes = {}
self._state = await self._client.set_device_state(new_state)
self._set_event.set()
self._set_event.clear()
@property
def name(self) -> str:
"""Return device name."""
return self._device_conf["DeviceName"]
@property
def device_type(self) -> str:
"""Return type of the device."""
return DEVICE_TYPE_LOOKUP.get(
self._device_conf.get("Device", {}).get("DeviceType", -1),
DEVICE_TYPE_UNKNOWN,
)
@property
def units(self) -> Optional[List[dict]]:
"""Return device model info."""
if self._device_units is None:
return None
infos: List[dict] = []
for unit in self._device_units:
infos.append(
{
"model_number": unit.get("ModelNumber"),
"model": unit.get("Model"),
"serial_number": unit.get("SerialNumber"),
}
)
return infos
@property
def temp_unit(self) -> str:
"""Return temperature unit used by the device."""
if self._use_fahrenheit:
return UNIT_TEMP_FAHRENHEIT
return UNIT_TEMP_CELSIUS
@property
def temperature_increment(self) -> float:
"""Return temperature increment."""
return self._device_conf.get("Device", {}).get("TemperatureIncrement", 0.5)
@property
def last_seen(self) -> Optional[datetime]:
"""Return timestamp of the last communication from device to MELCloud.
The timestamp is in UTC.
"""
if self._state is None:
return None
return datetime.strptime(
self._state.get("LastCommunication"), "%Y-%m-%dT%H:%M:%S.%f"
).replace(tzinfo=timezone.utc)
@property
def power(self) -> Optional[bool]:
"""Return power on / standby state of the device."""
if self._state is None:
return None
return self._state.get("Power")
@property
def wifi_signal(self) -> Optional[int]:
"""Return wifi signal in dBm (negative value)."""
if self._device_conf is None:
return None
return self._device_conf.get("Device", {}).get("WifiSignalStrength", None)
@property
def has_error(self) -> bool:
"""Return True if the device has error state."""
if self._state is None:
return False
return self._state.get("HasError", False)
@property
def error_code(self) -> Optional[str]:
"""Return error_code.
This is a property that probably should be checked if "has_error" = true
Till now I have a fixed code = 8000 and never have error on the units
"""
if self._state is None:
return None
return self._state.get("ErrorCode", None)
| 32.566038 | 86 | 0.61095 |
acfa2d6b194b92a111d31911c124f768472720e8 | 605 | py | Python | python/misc/return_the_next_number_from_the_integer_passed.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | null | null | null | python/misc/return_the_next_number_from_the_integer_passed.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | 2 | 2022-03-10T03:49:14.000Z | 2022-03-14T00:49:54.000Z | python/misc/return_the_next_number_from_the_integer_passed.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Return the Next Number from the Integer Passed.
Create a function that takes a number as an argument,
increments the number by +1 and returns the result.
Source:
https://edabit.com/challenge/KjCS7occ9hfu5snpb
"""
def addition(num: int) -> int:
"""Increase num by one."""
return num + 1
def main():
"""Run sample addition functions. Do not import."""
assert addition(2) == 3
assert addition(-9) == -8
assert addition(0) == 1
assert addition(999) == 1000
assert addition(73) == 74
print('Passed.')
if __name__ == "__main__":
main()
| 20.166667 | 55 | 0.652893 |
acfa2d70157808751af800b1ce045b158518b631 | 2,102 | py | Python | data/cirq_new/cirq_program/startCirq_Class576.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_Class576.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_Class576.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=18
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.Y.on(input_qubit[2])) # number=13
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.Y.on(input_qubit[1])) # number=14
c.append(cirq.Y.on(input_qubit[1])) # number=15
c.append(cirq.X.on(input_qubit[2])) # number=16
c.append(cirq.X.on(input_qubit[2])) # number=17
# circuit end
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class576.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 30.463768 | 80 | 0.667935 |
acfa2f70da75a4f905f3dd5ed729f9bab0226fb3 | 31 | py | Python | jupyter_wlm_spawner/__init__.py | dchirikov/jupyter_wlm_spawner | be0e5723f6dae017e7a1055d7055008bf57c2b3d | [
"MIT"
] | 2 | 2019-08-04T14:53:46.000Z | 2019-08-23T13:12:52.000Z | jupyter_wlm_spawner/__init__.py | dchirikov/jupyter_wlm_spawner | be0e5723f6dae017e7a1055d7055008bf57c2b3d | [
"MIT"
] | null | null | null | jupyter_wlm_spawner/__init__.py | dchirikov/jupyter_wlm_spawner | be0e5723f6dae017e7a1055d7055008bf57c2b3d | [
"MIT"
] | null | null | null | __author__ = 'Dmitry Chirikov'
| 15.5 | 30 | 0.774194 |
acfa30c04898c242a878b3e2325a84ecc9935c4b | 5,182 | py | Python | data/p3BR/R1/benchmark/startQiskit_noisy34.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R1/benchmark/startQiskit_noisy34.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R1/benchmark/startQiskit_noisy34.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=9
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy34.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 28.629834 | 140 | 0.627171 |
acfa30d3a31292fcd1c175d27b20eab37cbcdbfb | 4,807 | py | Python | ingestion/tests/integration/ometa/test_ometa_model_api.py | inspire99/OpenMetadata | a650aea59a41ab48a9e203af091ae13a1fdf46c2 | [
"Apache-2.0"
] | null | null | null | ingestion/tests/integration/ometa/test_ometa_model_api.py | inspire99/OpenMetadata | a650aea59a41ab48a9e203af091ae13a1fdf46c2 | [
"Apache-2.0"
] | null | null | null | ingestion/tests/integration/ometa/test_ometa_model_api.py | inspire99/OpenMetadata | a650aea59a41ab48a9e203af091ae13a1fdf46c2 | [
"Apache-2.0"
] | null | null | null | """
OpenMetadata high-level API Model test
"""
import uuid
from unittest import TestCase
from metadata.generated.schema.api.data.createModel import CreateModelEntityRequest
from metadata.generated.schema.api.teams.createUser import CreateUserEntityRequest
from metadata.generated.schema.entity.data.model import Model
from metadata.generated.schema.type.entityReference import EntityReference
from metadata.ingestion.ometa.ometa_api import OpenMetadata
from metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig
class OMetaModelTest(TestCase):
"""
Run this integration test with the local API available
Install the ingestion package before running the tests
"""
server_config = MetadataServerConfig(api_endpoint="http://localhost:8585/api")
metadata = OpenMetadata(server_config)
assert metadata.health_check()
user = metadata.create_or_update(
data=CreateUserEntityRequest(name="random-user", email="random@user.com"),
)
owner = EntityReference(id=user.id, type="user")
entity = Model(
id=uuid.uuid4(),
name="test-model",
algorithm="algo",
fullyQualifiedName="test-model",
)
create = CreateModelEntityRequest(name="test-model", algorithm="algo")
def test_create(self):
"""
We can create a Model and we receive it back as Entity
"""
res = self.metadata.create_or_update(data=self.create)
self.assertEqual(res.name, self.entity.name)
self.assertEqual(res.algorithm, self.entity.algorithm)
self.assertEqual(res.owner, None)
def test_update(self):
"""
Updating it properly changes its properties
"""
res_create = self.metadata.create_or_update(data=self.create)
updated = self.create.dict(exclude_unset=True)
updated["owner"] = self.owner
updated_entity = CreateModelEntityRequest(**updated)
res = self.metadata.create_or_update(data=updated_entity)
# Same ID, updated algorithm
self.assertEqual(res.algorithm, updated_entity.algorithm)
self.assertEqual(res_create.id, res.id)
self.assertEqual(res.owner.id, self.user.id)
# Getting without owner field does not return it by default
res_none = self.metadata.get_by_name(
entity=Model, fqdn=self.entity.fullyQualifiedName
)
self.assertIsNone(res_none.owner)
# We can request specific fields to be added
res_owner = self.metadata.get_by_name(
entity=Model,
fqdn=self.entity.fullyQualifiedName,
fields=["owner", "followers"],
)
self.assertEqual(res_owner.owner.id, self.user.id)
def test_get_name(self):
"""
We can fetch a model by name and get it back as Entity
"""
self.metadata.create_or_update(data=self.create)
res = self.metadata.get_by_name(
entity=Model, fqdn=self.entity.fullyQualifiedName
)
self.assertEqual(res.name, self.entity.name)
def test_get_id(self):
"""
We can fetch a model by ID and get it back as Entity
"""
self.metadata.create_or_update(data=self.create)
# First pick up by name
res_name = self.metadata.get_by_name(
entity=Model, fqdn=self.entity.fullyQualifiedName
)
# Then fetch by ID
res = self.metadata.get_by_id(entity=Model, entity_id=str(res_name.id.__root__))
self.assertEqual(res_name.id, res.id)
def test_list(self):
"""
We can list all our models
"""
self.metadata.create_or_update(data=self.create)
res = self.metadata.list_entities(entity=Model)
# Fetch our test model. We have already inserted it, so we should find it
data = next(
iter(ent for ent in res.entities if ent.name == self.entity.name), None
)
assert data
def test_delete(self):
"""
We can delete a model by ID
"""
self.metadata.create_or_update(data=self.create)
# Find by name
res_name = self.metadata.get_by_name(
entity=Model, fqdn=self.entity.fullyQualifiedName
)
# Then fetch by ID
res_id = self.metadata.get_by_id(
entity=Model, entity_id=str(res_name.id.__root__)
)
# Delete
self.metadata.delete(entity=Model, entity_id=str(res_id.id.__root__))
# Then we should not find it
res = self.metadata.list_entities(entity=Model)
assert not next(
iter(
ent
for ent in res.entities
if ent.fullyQualifiedName == self.entity.fullyQualifiedName
),
None,
)
| 31.012903 | 88 | 0.647181 |
acfa32b61f1ec98819877fc95804d1142dc52c1f | 2,984 | py | Python | s3parcp_download/miniwdl_s3parcp.py | chanzuckerberg/miniwdl-s3parcp | fa1baf802bf71b76dbcbc8922ad04d5e16c21b48 | [
"MIT"
] | 1 | 2020-02-14T15:45:57.000Z | 2020-02-14T15:45:57.000Z | s3parcp_download/miniwdl_s3parcp.py | chanzuckerberg/miniwdl-s3parcp | fa1baf802bf71b76dbcbc8922ad04d5e16c21b48 | [
"MIT"
] | 1 | 2020-02-25T06:04:04.000Z | 2020-03-02T19:37:12.000Z | s3parcp_download/miniwdl_s3parcp.py | chanzuckerberg/miniwdl-s3parcp | fa1baf802bf71b76dbcbc8922ad04d5e16c21b48 | [
"MIT"
] | 1 | 2020-02-19T08:05:04.000Z | 2020-02-19T08:05:04.000Z | """
miniwdl download plugin for s3:// URIs using s3parcp -- https://github.com/chanzuckerberg/s3parcp
Requires s3parcp docker image tag supplied in miniwdl configuration, either via custom cfg file
(section s3parcp, key docker_image) or environment variable MINIWDL__S3PARCP__DOCKER_IMAGE.
Inherits AWS credentials from miniwdl's environment (as detected by boto3).
The plugin is installed using the "entry points" mechanism in setup.py. Furthermore, the miniwdl
configuration [plugins] section has options to enable/disable installed plugins. Installed &
enabled plugins can be observed using miniwdl --version and/or miniwdl run --debug.
"""
import os
import tempfile
import boto3
def main(cfg, logger, uri, **kwargs):
# get AWS credentials from boto3
b3 = boto3.session.Session()
b3creds = b3.get_credentials()
aws_credentials = {
"AWS_ACCESS_KEY_ID": b3creds.access_key,
"AWS_SECRET_ACCESS_KEY": b3creds.secret_key,
}
if b3creds.token:
aws_credentials["AWS_SESSION_TOKEN"] = b3creds.token
# s3parcp (or perhaps underlying golang AWS lib) seems to require region set to match the
# bucket's; in contrast to awscli which can conveniently 'figure it out'
aws_credentials["AWS_REGION"] = b3.region_name if b3.region_name else "us-west-2"
# format them as env vars to be sourced in the WDL task command
aws_credentials = "\n".join(f"export {k}='{v}'" for (k, v) in aws_credentials.items())
# write them to a temp file that'll self-destruct automatically
temp_dir = "/mnt"
if cfg.has_option("s3parcp", "dir"):
temp_dir = cfg["s3parcp"]["dir"]
with tempfile.NamedTemporaryFile(
prefix="miniwdl_download_s3parcp_credentials_", delete=True, mode="w", dir=temp_dir
) as aws_credentials_file:
print(aws_credentials, file=aws_credentials_file, flush=True)
# make file group-readable to ensure it'll be usable if the docker image runs as non-root
os.chmod(aws_credentials_file.name, os.stat(aws_credentials_file.name).st_mode | 0o40)
# yield WDL task and inputs (followed by outputs as well)
recv = yield {
"task_wdl": wdl,
"inputs": {
"uri": uri,
"aws_credentials": aws_credentials_file.name,
"docker": cfg["s3parcp"]["docker_image"],
},
}
# yield task outputs (unchanged)
yield recv
# WDL task source code
wdl = """
task s3parcp {
input {
String uri
File aws_credentials
String docker
Int cpu = 4
}
command <<<
set -euo pipefail
source "~{aws_credentials}"
mkdir __out
cd __out
# allocating one hardware thread to two concurrent part xfers
s3parcp --checksum -c ~{cpu*2} "~{uri}" .
>>>
output {
File file = glob("__out/*")[0]
}
runtime {
cpu: cpu
memory: "~{cpu}G"
docker: docker
}
}
"""
| 32.791209 | 97 | 0.659182 |
acfa333c2971d2bb2555715f91f9e68bbe0c0213 | 10,333 | py | Python | mypy/argmap.py | anmolrajsoni15/mypy | 94059440a208b1d9b24dc5c621f3bfc96ce1741b | [
"PSF-2.0"
] | null | null | null | mypy/argmap.py | anmolrajsoni15/mypy | 94059440a208b1d9b24dc5c621f3bfc96ce1741b | [
"PSF-2.0"
] | null | null | null | mypy/argmap.py | anmolrajsoni15/mypy | 94059440a208b1d9b24dc5c621f3bfc96ce1741b | [
"PSF-2.0"
] | null | null | null | """Utilities for mapping between actual and formal arguments (and their types)."""
from typing import TYPE_CHECKING, List, Optional, Sequence, Callable, Set
from mypy.maptype import map_instance_to_supertype
from mypy.types import (
Type, Instance, TupleType, AnyType, TypeOfAny, TypedDictType, get_proper_type
)
from mypy import nodes
if TYPE_CHECKING:
from mypy.infer import ArgumentInferContext
def map_actuals_to_formals(actual_kinds: List[nodes.ArgKind],
actual_names: Optional[Sequence[Optional[str]]],
formal_kinds: List[nodes.ArgKind],
formal_names: Sequence[Optional[str]],
actual_arg_type: Callable[[int],
Type]) -> List[List[int]]:
"""Calculate mapping between actual (caller) args and formals.
The result contains a list of caller argument indexes mapping to each
callee argument index, indexed by callee index.
The caller_arg_type argument should evaluate to the type of the actual
argument type with the given index.
"""
nformals = len(formal_kinds)
formal_to_actual: List[List[int]] = [[] for i in range(nformals)]
ambiguous_actual_kwargs: List[int] = []
fi = 0
for ai, actual_kind in enumerate(actual_kinds):
if actual_kind == nodes.ARG_POS:
if fi < nformals:
if not formal_kinds[fi].is_star():
formal_to_actual[fi].append(ai)
fi += 1
elif formal_kinds[fi] == nodes.ARG_STAR:
formal_to_actual[fi].append(ai)
elif actual_kind == nodes.ARG_STAR:
# We need to know the actual type to map varargs.
actualt = get_proper_type(actual_arg_type(ai))
if isinstance(actualt, TupleType):
# A tuple actual maps to a fixed number of formals.
for _ in range(len(actualt.items)):
if fi < nformals:
if formal_kinds[fi] != nodes.ARG_STAR2:
formal_to_actual[fi].append(ai)
else:
break
if formal_kinds[fi] != nodes.ARG_STAR:
fi += 1
else:
# Assume that it is an iterable (if it isn't, there will be
# an error later).
while fi < nformals:
if formal_kinds[fi].is_named(star=True):
break
else:
formal_to_actual[fi].append(ai)
if formal_kinds[fi] == nodes.ARG_STAR:
break
fi += 1
elif actual_kind.is_named():
assert actual_names is not None, "Internal error: named kinds without names given"
name = actual_names[ai]
if name in formal_names:
formal_to_actual[formal_names.index(name)].append(ai)
elif nodes.ARG_STAR2 in formal_kinds:
formal_to_actual[formal_kinds.index(nodes.ARG_STAR2)].append(ai)
else:
assert actual_kind == nodes.ARG_STAR2
actualt = get_proper_type(actual_arg_type(ai))
if isinstance(actualt, TypedDictType):
for name in actualt.items:
if name in formal_names:
formal_to_actual[formal_names.index(name)].append(ai)
elif nodes.ARG_STAR2 in formal_kinds:
formal_to_actual[formal_kinds.index(nodes.ARG_STAR2)].append(ai)
else:
# We don't exactly know which **kwargs are provided by the
# caller, so we'll defer until all the other unambiguous
# actuals have been processed
ambiguous_actual_kwargs.append(ai)
if ambiguous_actual_kwargs:
# Assume the ambiguous kwargs will fill the remaining arguments.
#
# TODO: If there are also tuple varargs, we might be missing some potential
# matches if the tuple was short enough to not match everything.
unmatched_formals = [fi for fi in range(nformals)
if (formal_names[fi]
and (not formal_to_actual[fi]
or actual_kinds[formal_to_actual[fi][0]] == nodes.ARG_STAR)
and formal_kinds[fi] != nodes.ARG_STAR)
or formal_kinds[fi] == nodes.ARG_STAR2]
for ai in ambiguous_actual_kwargs:
for fi in unmatched_formals:
formal_to_actual[fi].append(ai)
return formal_to_actual
def map_formals_to_actuals(actual_kinds: List[nodes.ArgKind],
actual_names: Optional[Sequence[Optional[str]]],
formal_kinds: List[nodes.ArgKind],
formal_names: List[Optional[str]],
actual_arg_type: Callable[[int],
Type]) -> List[List[int]]:
"""Calculate the reverse mapping of map_actuals_to_formals."""
formal_to_actual = map_actuals_to_formals(actual_kinds,
actual_names,
formal_kinds,
formal_names,
actual_arg_type)
# Now reverse the mapping.
actual_to_formal: List[List[int]] = [[] for _ in actual_kinds]
for formal, actuals in enumerate(formal_to_actual):
for actual in actuals:
actual_to_formal[actual].append(formal)
return actual_to_formal
class ArgTypeExpander:
"""Utility class for mapping actual argument types to formal arguments.
One of the main responsibilities is to expand caller tuple *args and TypedDict
**kwargs, and to keep track of which tuple/TypedDict items have already been
consumed.
Example:
def f(x: int, *args: str) -> None: ...
f(*(1, 'x', 1.1))
We'd call expand_actual_type three times:
1. The first call would provide 'int' as the actual type of 'x' (from '1').
2. The second call would provide 'str' as one of the actual types for '*args'.
2. The third call would provide 'float' as one of the actual types for '*args'.
A single instance can process all the arguments for a single call. Each call
needs a separate instance since instances have per-call state.
"""
def __init__(self, context: 'ArgumentInferContext') -> None:
# Next tuple *args index to use.
self.tuple_index = 0
# Keyword arguments in TypedDict **kwargs used.
self.kwargs_used: Set[str] = set()
# Type context for `*` and `**` arg kinds.
self.context = context
def expand_actual_type(self,
actual_type: Type,
actual_kind: nodes.ArgKind,
formal_name: Optional[str],
formal_kind: nodes.ArgKind) -> Type:
"""Return the actual (caller) type(s) of a formal argument with the given kinds.
If the actual argument is a tuple *args, return the next individual tuple item that
maps to the formal arg.
If the actual argument is a TypedDict **kwargs, return the next matching typed dict
value type based on formal argument name and kind.
This is supposed to be called for each formal, in order. Call multiple times per
formal if multiple actuals map to a formal.
"""
actual_type = get_proper_type(actual_type)
if actual_kind == nodes.ARG_STAR:
if isinstance(actual_type, Instance) and actual_type.args:
from mypy.subtypes import is_subtype
if is_subtype(actual_type, self.context.iterable_type):
return map_instance_to_supertype(
actual_type,
self.context.iterable_type.type,
).args[0]
else:
# We cannot properly unpack anything other
# than `Iterable` type with `*`.
# Just return `Any`, other parts of code would raise
# a different error for improper use.
return AnyType(TypeOfAny.from_error)
elif isinstance(actual_type, TupleType):
# Get the next tuple item of a tuple *arg.
if self.tuple_index >= len(actual_type.items):
# Exhausted a tuple -- continue to the next *args.
self.tuple_index = 1
else:
self.tuple_index += 1
return actual_type.items[self.tuple_index - 1]
else:
return AnyType(TypeOfAny.from_error)
elif actual_kind == nodes.ARG_STAR2:
from mypy.subtypes import is_subtype
if isinstance(actual_type, TypedDictType):
if formal_kind != nodes.ARG_STAR2 and formal_name in actual_type.items:
# Lookup type based on keyword argument name.
assert formal_name is not None
else:
# Pick an arbitrary item if no specified keyword is expected.
formal_name = (set(actual_type.items.keys()) - self.kwargs_used).pop()
self.kwargs_used.add(formal_name)
return actual_type.items[formal_name]
elif (
isinstance(actual_type, Instance) and
len(actual_type.args) > 1 and
is_subtype(actual_type, self.context.mapping_type)
):
# Only `Mapping` type can be unpacked with `**`.
# Other types will produce an error somewhere else.
return map_instance_to_supertype(
actual_type,
self.context.mapping_type.type,
).args[1]
else:
return AnyType(TypeOfAny.from_error)
else:
# No translation for other kinds -- 1:1 mapping.
return actual_type
| 46.336323 | 97 | 0.562954 |
acfa33e67e81157599ce50fa9c8d642c5917c379 | 30,356 | py | Python | patch/mf.py | yabhinav/FeedNotifier | d0b4b1b1623e5b0abcfa7f79cf24442c0d682754 | [
"BSD-3-Clause"
] | 118 | 2015-01-07T20:14:59.000Z | 2022-02-24T20:57:35.000Z | patch/mf.py | yabhinav/FeedNotifier | d0b4b1b1623e5b0abcfa7f79cf24442c0d682754 | [
"BSD-3-Clause"
] | 50 | 2015-01-12T08:01:04.000Z | 2021-05-24T05:29:40.000Z | patch/mf.py | yabhinav/FeedNotifier | d0b4b1b1623e5b0abcfa7f79cf24442c0d682754 | [
"BSD-3-Clause"
] | 42 | 2015-01-13T10:39:21.000Z | 2022-03-25T08:54:37.000Z | """Find modules used by a script, using introspection."""
# This module should be kept compatible with Python 2.2, see PEP 291.
from __future__ import generators
import dis
import imp
import marshal
import os
import sys
import types
import struct
if hasattr(sys.__stdout__, "newlines"):
READ_MODE = "U" # universal line endings
else:
# remain compatible with Python < 2.3
READ_MODE = "r"
LOAD_CONST = chr(dis.opname.index('LOAD_CONST'))
IMPORT_NAME = chr(dis.opname.index('IMPORT_NAME'))
STORE_NAME = chr(dis.opname.index('STORE_NAME'))
STORE_GLOBAL = chr(dis.opname.index('STORE_GLOBAL'))
STORE_OPS = [STORE_NAME, STORE_GLOBAL]
HAVE_ARGUMENT = chr(dis.HAVE_ARGUMENT)
# !!! NOTE BEFORE INCLUDING IN PYTHON DISTRIBUTION !!!
# To clear up issues caused by the duplication of data structures between
# the real Python modulefinder and this duplicate version, packagePathMap
# and replacePackageMap are imported from the actual modulefinder. This
# should be changed back to the assigments that are commented out below.
# There are also py2exe specific pieces at the bottom of this file.
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
#~ packagePathMap = {}
from modulefinder import packagePathMap
# A Public interface
def AddPackagePath(packagename, path):
paths = packagePathMap.get(packagename, [])
paths.append(path)
packagePathMap[packagename] = paths
#~ replacePackageMap = {}
from modulefinder import replacePackageMap
# This ReplacePackage mechanism allows modulefinder to work around the
# way the _xmlplus package injects itself under the name "xml" into
# sys.modules at runtime by calling ReplacePackage("_xmlplus", "xml")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[], skip_scan=[]):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
self.replace_paths = replace_paths
self.skip_scan = skip_scan
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print " ",
print str,
for arg in args:
print repr(arg),
print
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
fp = open(pathname, READ_MODE)
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
fp = open(pathname, READ_MODE)
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError, "relative importpath too deep"
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError, "No module named " + qname
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError, "No module named " + mname
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError, "No module named " + subname
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
for triple in imp.get_suffixes():
suffixes.append(triple[0])
for dir in m.__path__:
try:
names = os.listdir(dir)
except os.error:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if self.badmodules.has_key(fqname):
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp: fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, (suffix, mode, type)):
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
elif type == imp.PY_COMPILED:
if fp.read(4) != imp.get_magic():
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError, "Bad magic number in %s" % pathname
fp.read(4)
co = marshal.load(fp)
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
if sub in self.badmodules:
self._add_badmodule(sub, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
fullname = name + "." + sub
self._add_badmodule(fullname, caller)
def scan_opcodes(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Version for Python 2.4 and older
code = co.co_code
names = co.co_names
consts = co.co_consts
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if c == LOAD_CONST and code[3] == IMPORT_NAME:
oparg_1, oparg_2 = unpack('<xHxH', code[:6])
yield "import", (consts[oparg_1], names[oparg_2])
code = code[6:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_opcodes_25(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Python 2.5 version (has absolute and relative imports)
code = co.co_code
names = co.co_names
consts = co.co_consts
LOAD_LOAD_AND_IMPORT = LOAD_CONST + LOAD_CONST + IMPORT_NAME
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if code[:9:3] == LOAD_LOAD_AND_IMPORT:
oparg_1, oparg_2, oparg_3 = unpack('<xHxHxH', code[:9])
level = consts[oparg_1]
if level == -1: # normal import
yield "import", (consts[oparg_2], names[oparg_3])
elif level == 0: # absolute import
yield "absolute_import", (consts[oparg_2], names[oparg_3])
else: # relative import
yield "relative_import", (level, consts[oparg_2], names[oparg_3])
code = code[9:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_code(self, co, m):
if m.__name__ in self.skip_scan:
return
code = co.co_code
if sys.version_info >= (2, 5):
scanner = self.scan_opcodes_25
else:
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what in ("import", "absolute_import"):
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
if what == "absolute_import": level = 0
else: level = -1
self._safe_import_hook(name, m, fromlist, level=level)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
def add_module(self, fqname):
if self.modules.has_key(fqname):
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError, name
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return imp.find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print
print " %-25s %s" % ("Name", "File")
print " %-25s %s" % ("----", "----")
# Print modules found
keys = self.modules.keys()
keys.sort()
for key in keys:
m = self.modules[key]
if m.__path__:
print "P",
else:
print "m",
print "%-25s" % key, m.__file__ or ""
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print
print "Missing modules:"
for name in missing:
mods = self.badmodules[name].keys()
mods.sort()
print "?", name, "imported from", ', '.join(mods)
# Print modules that may be missing, but then again, maybe not...
if maybe:
print
print "Submodules thay appear to be missing, but could also be",
print "global names in the parent package:"
for name in maybe:
mods = self.badmodules[name].keys()
mods.sort()
print "?", name, "imported from", ', '.join(mods)
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error, msg:
print msg
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print "path:"
for item in path:
print " ", repr(item)
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print "\n[interrupt]"
# py2exe specific portion - this should be removed before inclusion in the
# Python distribution
import tempfile
import urllib
try:
set
except NameError:
from sets import Set as set
Base = ModuleFinder
del ModuleFinder
# Much inspired by Toby Dickenson's code:
# http://www.tarind.com/depgraph.html
class ModuleFinder(Base):
def __init__(self, *args, **kw):
self._depgraph = {}
self._types = {}
self._last_caller = None
self._scripts = set()
Base.__init__(self, *args, **kw)
def run_script(self, pathname):
# Scripts always end in the __main__ module, but we possibly
# have more than one script in py2exe, so we want to keep
# *all* the pathnames.
self._scripts.add(pathname)
Base.run_script(self, pathname)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
old_last_caller = self._last_caller
try:
self._last_caller = caller
return Base.import_hook(self,name,caller,fromlist,level)
finally:
self._last_caller = old_last_caller
def import_module(self,partnam,fqname,parent):
r = Base.import_module(self,partnam,fqname,parent)
if r is not None and self._last_caller:
self._depgraph.setdefault(self._last_caller.__name__, set()).add(r.__name__)
return r
def load_module(self, fqname, fp, pathname, (suffix, mode, typ)):
r = Base.load_module(self, fqname, fp, pathname, (suffix, mode, typ))
if r is not None:
self._types[r.__name__] = typ
return r
def create_xref(self):
# this code probably needs cleanup
depgraph = {}
importedby = {}
for name, value in self._depgraph.items():
depgraph[name] = list(value)
for needs in value:
importedby.setdefault(needs, set()).add(name)
names = self._types.keys()
names.sort()
fd, htmlfile = tempfile.mkstemp(".html")
ofi = open(htmlfile, "w")
os.close(fd)
print >> ofi, "<html><title>py2exe cross reference for %s</title><body>" % sys.argv[0]
print >> ofi, "<h1>py2exe cross reference for %s</h1>" % sys.argv[0]
for name in names:
if self._types[name] in (imp.PY_SOURCE, imp.PKG_DIRECTORY):
print >> ofi, '<a name="%s"><b><tt>%s</tt></b></a>' % (name, name)
if name == "__main__":
for fname in self._scripts:
path = urllib.pathname2url(os.path.abspath(fname))
print >> ofi, '<a target="code" href="%s" type="text/plain"><tt>%s</tt></a> ' \
% (path, fname)
print >> ofi, '<br>imports:'
else:
fname = urllib.pathname2url(self.modules[name].__file__)
print >> ofi, '<a target="code" href="%s" type="text/plain"><tt>%s</tt></a><br>imports:' \
% (fname, self.modules[name].__file__)
else:
fname = self.modules[name].__file__
if fname:
print >> ofi, '<a name="%s"><b><tt>%s</tt></b></a> <tt>%s</tt><br>imports:' \
% (name, name, fname)
else:
print >> ofi, '<a name="%s"><b><tt>%s</tt></b></a> <i>%s</i><br>imports:' \
% (name, name, TYPES[self._types[name]])
if name in depgraph:
needs = depgraph[name]
for n in needs:
print >> ofi, '<a href="#%s"><tt>%s</tt></a> ' % (n, n)
print >> ofi, "<br>\n"
print >> ofi, 'imported by:'
if name in importedby:
for i in importedby[name]:
print >> ofi, '<a href="#%s"><tt>%s</tt></a> ' % (i, i)
print >> ofi, "<br>\n"
print >> ofi, "<br>\n"
print >> ofi, "</body></html>"
ofi.close()
os.startfile(htmlfile)
# how long does it take to start the browser?
import threading
threading.Timer(5, os.remove, args=[htmlfile])
TYPES = {imp.C_BUILTIN: "(builtin module)",
imp.C_EXTENSION: "extension module",
imp.IMP_HOOK: "IMP_HOOK",
imp.PKG_DIRECTORY: "package directory",
imp.PY_CODERESOURCE: "PY_CODERESOURCE",
imp.PY_COMPILED: "compiled python module",
imp.PY_FROZEN: "frozen module",
imp.PY_RESOURCE: "PY_RESOURCE",
imp.PY_SOURCE: "python module",
imp.SEARCH_ERROR: "SEARCH_ERROR"
}
| 37.338253 | 111 | 0.51713 |
acfa355a4f5c7c5745cf43bc8e6a031f1716a3b9 | 4,031 | py | Python | tests/test_new_api.py | RonaldinhoL/logzero | 868d0b57fc3fa4efbcb9b9829f14aece46ee5541 | [
"MIT"
] | 1,091 | 2017-06-27T12:48:13.000Z | 2022-03-16T10:20:42.000Z | tests/test_new_api.py | RonaldinhoL/logzero | 868d0b57fc3fa4efbcb9b9829f14aece46ee5541 | [
"MIT"
] | 344 | 2017-06-30T14:49:27.000Z | 2022-03-28T02:47:25.000Z | tests/test_new_api.py | RonaldinhoL/logzero | 868d0b57fc3fa4efbcb9b9829f14aece46ee5541 | [
"MIT"
] | 88 | 2017-07-03T18:44:48.000Z | 2022-01-09T12:56:34.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_logzero
----------------------------------
Tests for `logzero` module.
"""
import os
import tempfile
import logzero
def test_api_logfile(capsys):
"""
logzero.logfile(..) should work as expected
"""
logzero.reset_default_logger()
temp = tempfile.NamedTemporaryFile()
try:
logzero.logger.info("info1")
# Set logfile
logzero.logfile(temp.name)
logzero.logger.info("info2")
# Remove logfile
logzero.logfile(None)
logzero.logger.info("info3")
# Set logfile again
logzero.logfile(temp.name)
logzero.logger.info("info4")
with open(temp.name) as f:
content = f.read()
assert "] info1" not in content
assert "] info2" in content
assert "] info3" not in content
assert "] info4" in content
finally:
temp.close()
def test_api_loglevel(capsys):
"""
Should reconfigure the internal logger loglevel
"""
logzero.reset_default_logger()
temp = tempfile.NamedTemporaryFile()
try:
logzero.logfile(temp.name)
logzero.logger.info("info1")
logzero.loglevel(logzero.WARN)
logzero.logger.info("info2")
logzero.logger.warning("warn1")
with open(temp.name) as f:
content = f.read()
assert "] info1" in content
assert "] info2" not in content
assert "] warn1" in content
finally:
temp.close()
def test_api_loglevel_custom_handlers(capsys):
"""
Should reconfigure the internal logger loglevel and custom handlers
"""
logzero.reset_default_logger()
# TODO
pass
# temp = tempfile.NamedTemporaryFile()
# try:
# logzero.logfile(temp.name)
# logzero.logger.info("info1")
# logzero.loglevel(logzero.WARN)
# logzero.logger.info("info2")
# logzero.logger.warning("warn1")
# with open(temp.name) as f:
# content = f.read()
# assert "] info1" in content
# assert "] info2" not in content
# assert "] warn1" in content
# finally:
# temp.close()
def test_api_rotating_logfile(capsys):
"""
logzero.rotating_logfile(..) should work as expected
"""
logzero.reset_default_logger()
temp = tempfile.NamedTemporaryFile()
try:
logzero.logger.info("info1")
# Set logfile
logzero.logfile(temp.name, maxBytes=10, backupCount=3)
logzero.logger.info("info2")
logzero.logger.info("info3")
with open(temp.name) as f:
content = f.read()
assert "] info1" not in content # logged before setting up logfile
assert "] info2" not in content # already rotated out
assert "] info3" in content # already rotated out
fn_rotated = temp.name + ".1"
assert os.path.exists(fn_rotated)
with open(fn_rotated) as f:
content = f.read()
assert "] info2" in content
finally:
temp.close()
def test_api_logfile_custom_loglevel():
"""
logzero.logfile(..) should be able to use a custom loglevel
"""
logzero.reset_default_logger()
temp = tempfile.NamedTemporaryFile()
try:
# Set logfile with custom loglevel
logzero.logfile(temp.name, loglevel=logzero.WARN)
logzero.logger.info("info1")
logzero.logger.warning("warn1")
# If setting a loglevel with logzero.loglevel(..) it will not overwrite
# the custom loglevel of the file handler
logzero.loglevel(logzero.INFO)
logzero.logger.info("info2")
logzero.logger.warning("warn2")
with open(temp.name) as f:
content = f.read()
assert "] info1" not in content
assert "] warn1" in content
assert "] info2" not in content
assert "] warn2" in content
finally:
temp.close()
| 26.695364 | 79 | 0.587199 |
acfa359b06136956469bcd45ab7d1a8d7f1f3801 | 306,434 | py | Python | src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_managed_cluster_decorator.py | warren-jones/azure-cli | a7d5d6d53f1b8ecd8a28752ea06e3f5499f20856 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_managed_cluster_decorator.py | warren-jones/azure-cli | a7d5d6d53f1b8ecd8a28752ea06e3f5499f20856 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_managed_cluster_decorator.py | warren-jones/azure-cli | a7d5d6d53f1b8ecd8a28752ea06e3f5499f20856 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import importlib
import unittest
from unittest.mock import Mock, call, patch, ANY
from azure.cli.command_modules.acs._consts import (
ADDONS,
CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
CONST_AZURE_POLICY_ADDON_NAME,
CONST_CONFCOM_ADDON_NAME,
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
CONST_INGRESS_APPGW_ADDON_NAME,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
CONST_INGRESS_APPGW_SUBNET_CIDR,
CONST_INGRESS_APPGW_SUBNET_ID,
CONST_INGRESS_APPGW_WATCH_NAMESPACE,
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_OPEN_SERVICE_MESH_ADDON_NAME,
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
CONST_PRIVATE_DNS_ZONE_NONE,
CONST_PRIVATE_DNS_ZONE_SYSTEM,
CONST_ROTATION_POLL_INTERVAL,
CONST_SECRET_ROTATION_ENABLED,
CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME,
CONST_MONITORING_USING_AAD_MSI_AUTH,
CONST_LOAD_BALANCER_SKU_STANDARD,
CONST_LOAD_BALANCER_SKU_BASIC,
CONST_DEFAULT_NODE_OS_TYPE,
CONST_VIRTUAL_MACHINE_SCALE_SETS,
CONST_NODEPOOL_MODE_SYSTEM,
CONST_DEFAULT_NODE_VM_SIZE,
DecoratorEarlyExitException,
DecoratorMode,
AgentPoolDecoratorMode,
)
from azure.cli.command_modules.acs.agentpool_decorator import AKSAgentPoolContext, AKSAgentPoolParamDict
from azure.cli.command_modules.acs.managed_cluster_decorator import (
AKSManagedClusterContext,
AKSManagedClusterCreateDecorator,
AKSManagedClusterModels,
AKSManagedClusterParamDict,
AKSManagedClusterUpdateDecorator,
)
from azure.cli.command_modules.acs.tests.latest.mocks import (
MockCLI,
MockClient,
MockCmd,
)
from azure.cli.command_modules.acs.tests.latest.utils import get_test_data_file_path
from azure.cli.core.azclierror import (
AzureInternalError,
AzCLIError,
CLIInternalError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
NoTTYError,
RequiredArgumentMissingError,
UnknownError,
)
from azure.cli.core.profiles import ResourceType
from azure.core.exceptions import HttpResponseError
from knack.prompting import NoTTYException
from knack.util import CLIError
class AKSManagedClusterModelsTestCase(unittest.TestCase):
def setUp(self):
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
def test_models(self):
models = AKSManagedClusterModels(self.cmd, ResourceType.MGMT_CONTAINERSERVICE)
# load models directly (instead of through the `get_sdk` method provided by the cli component)
from azure.cli.core.profiles._shared import AZURE_API_PROFILES
sdk_profile = AZURE_API_PROFILES["latest"][ResourceType.MGMT_CONTAINERSERVICE]
api_version = sdk_profile.default_api_version
module_name = "azure.mgmt.containerservice.v{}.models".format(api_version.replace("-", "_"))
module = importlib.import_module(module_name)
# load balancer models
self.assertEqual(
models.load_balancer_models.ManagedClusterLoadBalancerProfile,
getattr(module, "ManagedClusterLoadBalancerProfile"),
)
self.assertEqual(
models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs,
getattr(module, "ManagedClusterLoadBalancerProfileManagedOutboundIPs"),
)
self.assertEqual(
models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPs,
getattr(module, "ManagedClusterLoadBalancerProfileOutboundIPs"),
)
self.assertEqual(
models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes,
getattr(module, "ManagedClusterLoadBalancerProfileOutboundIPPrefixes"),
)
self.assertEqual(
models.load_balancer_models.ResourceReference,
getattr(module, "ResourceReference"),
)
# nat gateway models
self.assertEqual(
models.nat_gateway_models.ManagedClusterNATGatewayProfile,
getattr(module, "ManagedClusterNATGatewayProfile"),
)
self.assertEqual(
models.nat_gateway_models.ManagedClusterManagedOutboundIPProfile,
getattr(module, "ManagedClusterManagedOutboundIPProfile"),
)
class AKSManagedClusterContextTestCase(unittest.TestCase):
def setUp(self):
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.models = AKSManagedClusterModels(self.cmd, ResourceType.MGMT_CONTAINERSERVICE)
def test__init__(self):
# fail on not passing dictionary-like parameters
with self.assertRaises(CLIInternalError):
AKSManagedClusterContext(self.cmd, [], self.models, DecoratorMode.CREATE)
# fail on not passing decorator_mode with Enum type DecoratorMode
with self.assertRaises(CLIInternalError):
AKSManagedClusterContext(self.cmd, AKSManagedClusterParamDict({}), self.models, 1)
def test_attach_mc(self):
ctx_1 = AKSManagedClusterContext(self.cmd, AKSManagedClusterParamDict({}), self.models, DecoratorMode.CREATE)
mc = self.models.ManagedCluster(location="test_location")
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.mc, mc)
# fail on attach again
with self.assertRaises(CLIInternalError):
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.existing_mc, None)
def test_attach_existing_mc(self):
ctx_1 = AKSManagedClusterContext(self.cmd, AKSManagedClusterParamDict({}), self.models, DecoratorMode.UPDATE)
mc = self.models.ManagedCluster(location="test_location")
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.existing_mc, mc)
# fail on attach again
with self.assertRaises(CLIInternalError):
ctx_1.attach_existing_mc(mc)
def test_attach_agentpool_context(self):
ctx_1 = AKSManagedClusterContext(self.cmd, AKSManagedClusterParamDict({}), self.models, DecoratorMode.CREATE)
agentpool_ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({}),
self.models,
DecoratorMode.CREATE,
AgentPoolDecoratorMode.MANAGED_CLUSTER,
)
ctx_1.attach_agentpool_context(agentpool_ctx_1)
self.assertEqual(ctx_1.agentpool_context, agentpool_ctx_1)
# fail on attach again
with self.assertRaises(CLIInternalError):
ctx_1.attach_agentpool_context(agentpool_ctx_1)
def test_validate_cluster_autoscaler_profile(self):
ctx = AKSManagedClusterContext(self.cmd, AKSManagedClusterParamDict({}), self.models, DecoratorMode.CREATE)
# default
s1 = None
t1 = ctx._AKSManagedClusterContext__validate_cluster_autoscaler_profile(s1)
g1 = None
self.assertEqual(t1, g1)
# invalid type
s2 = set()
# fail on invalid type
with self.assertRaises(InvalidArgumentValueError):
ctx._AKSManagedClusterContext__validate_cluster_autoscaler_profile(s2)
# empty list
s3 = []
t3 = ctx._AKSManagedClusterContext__validate_cluster_autoscaler_profile(s3)
g3 = {}
self.assertEqual(t3, g3)
# empty dict
s4 = {}
t4 = ctx._AKSManagedClusterContext__validate_cluster_autoscaler_profile(s4)
g4 = {}
self.assertEqual(t4, g4)
# empty key & empty value
s5 = ["="]
# fail on empty key
with self.assertRaises(InvalidArgumentValueError):
ctx._AKSManagedClusterContext__validate_cluster_autoscaler_profile(s5)
# non-empty key & empty value
s6 = ["scan-interval="]
t6 = ctx._AKSManagedClusterContext__validate_cluster_autoscaler_profile(s6)
g6 = {"scan-interval": ""}
self.assertEqual(t6, g6)
# invalid key
s7 = ["bad-key=val"]
# fail on invalid key
with self.assertRaises(InvalidArgumentValueError):
ctx._AKSManagedClusterContext__validate_cluster_autoscaler_profile(s7)
# valid key
s8 = ["scan-interval=20s", "scale-down-delay-after-add=15m"]
t8 = ctx._AKSManagedClusterContext__validate_cluster_autoscaler_profile(s8)
g8 = {"scan-interval": "20s", "scale-down-delay-after-add": "15m"}
self.assertEqual(t8, g8)
# two pairs of empty key & empty value
s9 = ["=", "="]
# fail on empty key
with self.assertRaises(InvalidArgumentValueError):
ctx._AKSManagedClusterContext__validate_cluster_autoscaler_profile(s9)
# additional empty key & empty value
s10 = ["scan-interval=20s", "="]
# fail on empty key
with self.assertRaises(InvalidArgumentValueError):
ctx._AKSManagedClusterContext__validate_cluster_autoscaler_profile(s10)
def test_validate_gmsa_options(self):
# default
ctx = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({}),
self.models,
DecoratorMode.CREATE,
)
ctx._AKSManagedClusterContext__validate_gmsa_options(False, None, None, False)
ctx._AKSManagedClusterContext__validate_gmsa_options(True, None, None, True)
# fail on yes & prompt_y_n not specified
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.prompt_y_n",
return_value=False,
), self.assertRaises(DecoratorEarlyExitException):
ctx._AKSManagedClusterContext__validate_gmsa_options(True, None, None, False)
# fail on gmsa_root_domain_name not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx._AKSManagedClusterContext__validate_gmsa_options(True, "test_gmsa_dns_server", None, False)
# fail on enable_windows_gmsa not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx._AKSManagedClusterContext__validate_gmsa_options(False, None, "test_gmsa_root_domain_name", False)
# fail on enable_windows_gmsa not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx._AKSManagedClusterContext__validate_gmsa_options(
False, "test_gmsa_dns_server", "test_gmsa_root_domain_name", False
)
def test_get_subscription_id(self):
ctx_1 = AKSManagedClusterContext(self.cmd, AKSManagedClusterParamDict({}), self.models, DecoratorMode.CREATE)
ctx_1.set_intermediate("subscription_id", "test_subscription_id")
self.assertEqual(
ctx_1.get_subscription_id(),
"test_subscription_id",
)
ctx_1.remove_intermediate("subscription_id")
self.assertEqual(ctx_1.get_intermediate("subscription_id"), None)
mock_profile = Mock(get_subscription_id=Mock(return_value="test_subscription_id"))
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.Profile",
return_value=mock_profile,
):
self.assertEqual(
ctx_1.get_subscription_id(),
"test_subscription_id",
)
mock_profile.get_subscription_id.assert_called_once()
def test_get_resource_group_name(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"resource_group_name": "test_rg_name"}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_resource_group_name(), "test_rg_name")
def test_get_name(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"name": "test_name"}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_name(), "test_name")
def test_get_location(self):
# default & dynamic completion
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"location": None}),
self.models,
DecoratorMode.CREATE,
)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.get_rg_location",
return_value="test_location",
) as mock_get_rg_location:
self.assertEqual(ctx_1._get_location(read_only=True), None)
self.assertEqual(ctx_1.get_intermediate("location"), None)
self.assertEqual(ctx_1.get_location(), "test_location")
self.assertEqual(ctx_1.get_intermediate("location"), "test_location")
self.assertEqual(ctx_1.get_location(), "test_location")
mock_get_rg_location.assert_called_once()
mc = self.models.ManagedCluster(location="test_mc_location")
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_location(), "test_mc_location")
def test_get_tags(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"tags": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_tags(), None)
mc = self.models.ManagedCluster(
location="test_location",
tags={},
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_tags(), {})
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"tags": {"xyz": "100"},
}
),
self.models,
DecoratorMode.UPDATE,
)
mc_2 = self.models.ManagedCluster(
location="test_location",
tags={},
)
ctx_2.attach_mc(mc_2)
self.assertEqual(ctx_2.get_tags(), {"xyz": "100"})
def test_get_kubernetes_version(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"kubernetes_version": ""}),
self.models,
DecoratorMode.CREATE,
)
agentpool_ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"kubernetes_version": ""}),
self.models,
DecoratorMode.CREATE,
AgentPoolDecoratorMode.MANAGED_CLUSTER,
)
ctx_1.attach_agentpool_context(agentpool_ctx_1)
self.assertEqual(ctx_1.get_kubernetes_version(), "")
def test_get_dns_name_prefix(self):
# default & dynamic completion
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"dns_name_prefix": None,
"fqdn_subdomain": None,
"name": "1234_test_name",
"resource_group_name": "test_rg_name",
}
),
self.models,
DecoratorMode.CREATE,
)
ctx_1.set_intermediate("subscription_id", "1234-5678")
self.assertEqual(ctx_1._get_dns_name_prefix(read_only=True), None)
self.assertEqual(ctx_1.get_dns_name_prefix(), "a1234testn-testrgname-1234-5")
mc = self.models.ManagedCluster(location="test_location", dns_prefix="test_mc_dns_name_prefix")
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_dns_name_prefix(), "test_mc_dns_name_prefix")
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"dns_name_prefix": "test_dns_name_prefix",
"fqdn_subdomain": "test_fqdn_subdomain",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on mutually exclusive dns_name_prefix and fqdn_subdomain
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_2.get_dns_name_prefix()
def test_get_node_osdisk_diskencryptionset_id(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"node_osdisk_diskencryptionset_id": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_node_osdisk_diskencryptionset_id(), None)
mc = self.models.ManagedCluster(
location="test_location",
disk_encryption_set_id="test_node_osdisk_diskencryptionset_id",
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_node_osdisk_diskencryptionset_id(),
"test_node_osdisk_diskencryptionset_id",
)
def test_get_ssh_key_value_and_no_ssh_key(self):
import paramiko
key = paramiko.RSAKey.generate(2048)
public_key = "{} {}".format(key.get_name(), key.get_base64())
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"ssh_key_value": public_key, "no_ssh_key": False}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_ssh_key_value_and_no_ssh_key(), (public_key, False))
ssh_config = self.models.ContainerServiceSshConfiguration(
public_keys=[self.models.ContainerServiceSshPublicKey(key_data="test_mc_ssh_key_value")]
)
linux_profile = self.models.ContainerServiceLinuxProfile(admin_username="test_user", ssh=ssh_config)
mc = self.models.ManagedCluster(location="test_location", linux_profile=linux_profile)
ctx_1.attach_mc(mc)
# fail on invalid key
with self.assertRaises(InvalidArgumentValueError):
self.assertEqual(
ctx_1.get_ssh_key_value_and_no_ssh_key(),
"test_mc_ssh_key_value",
)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"ssh_key_value": "fake-key", "no_ssh_key": False}),
self.models,
DecoratorMode.CREATE,
)
# fail on invalid key
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_ssh_key_value_and_no_ssh_key()
# custom value
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"ssh_key_value": "fake-key", "no_ssh_key": True}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_3.get_ssh_key_value_and_no_ssh_key(), ("fake-key", True))
ssh_config_3 = self.models.ContainerServiceSshConfiguration(
public_keys=[self.models.ContainerServiceSshPublicKey(key_data="test_mc_ssh_key_value")]
)
linux_profile_3 = self.models.ContainerServiceLinuxProfile(admin_username="test_user", ssh=ssh_config_3)
mc_3 = self.models.ManagedCluster(location="test_location", linux_profile=linux_profile_3)
ctx_3.attach_mc(mc_3)
# fail on inconsistent state
with self.assertRaises(CLIInternalError):
self.assertEqual(
ctx_3.get_ssh_key_value_and_no_ssh_key(),
"test_mc_ssh_key_value",
)
def test_get_admin_username(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"admin_username": "azureuser"}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_admin_username(), "azureuser")
ssh_config = self.models.ContainerServiceSshConfiguration(public_keys=[])
linux_profile = self.models.ContainerServiceLinuxProfile(admin_username="test_mc_user", ssh=ssh_config)
mc = self.models.ManagedCluster(location="test_location", linux_profile=linux_profile)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_admin_username(), "test_mc_user")
def test_get_windows_admin_username_and_password(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"windows_admin_username": None, "windows_admin_password": None}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_windows_admin_username_and_password(), (None, None))
windows_profile = self.models.ManagedClusterWindowsProfile(
# [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="fake secrets in unit test")]
admin_username="test_mc_win_admin",
)
mc = self.models.ManagedCluster(location="test_location", windows_profile=windows_profile)
ctx_1.attach_mc(mc)
# fail on inconsistent state
with self.assertRaises(CLIInternalError):
self.assertEqual(
ctx_1.get_windows_admin_username_and_password(),
("test_mc_win_admin", "test_mc_win_admin_password"),
)
# dynamic completion
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"windows_admin_username": None,
"windows_admin_password": "test_win_admin_pd",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on no tty
with patch(
"knack.prompting.verify_is_a_tty",
side_effect=NoTTYException,
), self.assertRaises(NoTTYError):
ctx_2.get_windows_admin_username_and_password()
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.prompt",
return_value="test_win_admin_name",
):
self.assertEqual(
ctx_2._get_windows_admin_username_and_password(read_only=True),
(None, "test_win_admin_pd"),
)
self.assertEqual(
ctx_2.get_windows_admin_username_and_password(),
("test_win_admin_name", "test_win_admin_pd"),
)
windows_profile = self.models.ManagedClusterWindowsProfile(
# [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="fake secrets in unit test")]
admin_username="test_mc_win_admin_name",
admin_password="test_mc_win_admin_pd",
)
mc = self.models.ManagedCluster(location="test_location", windows_profile=windows_profile)
ctx_2.attach_mc(mc)
self.assertEqual(
ctx_2.get_windows_admin_username_and_password(),
("test_mc_win_admin_name", "test_mc_win_admin_pd"),
)
# dynamic completion
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"windows_admin_username": "test_win_admin_name",
"windows_admin_password": None,
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on no tty
with patch(
"knack.prompting.verify_is_a_tty",
side_effect=NoTTYException,
), self.assertRaises(NoTTYError):
ctx_3.get_windows_admin_username_and_password()
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.prompt_pass",
return_value="test_win_admin_pd",
):
self.assertEqual(
ctx_3.get_windows_admin_username_and_password(),
("test_win_admin_name", "test_win_admin_pd"),
)
# custom value
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"windows_admin_username": None,
"windows_admin_password": None,
"enable_windows_gmsa": False,
"gmsa_dns_server": None,
"gmsa_root_domain_name": "test_gmsa_root_domain_name",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on windows admin username/password not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_4.get_windows_admin_username_and_password()
def test_get_windows_admin_password(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"windows_admin_password": None}),
self.models,
DecoratorMode.UPDATE,
)
self.assertEqual(ctx_1.get_windows_admin_password(), None)
def test_get_enable_ahub(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"enable_ahub": False}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_ahub(), False)
windows_profile = self.models.ManagedClusterWindowsProfile(
# [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="fake secrets in unit test")]
admin_username="test_mc_win_admin",
admin_password="test_mc_win_admin_password",
license_type="Windows_Server",
)
mc = self.models.ManagedCluster(location="test_location", windows_profile=windows_profile)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_ahub(), True)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"enable_ahub": True, "disable_ahub": True}),
self.models,
DecoratorMode.UPDATE,
)
# fail on mutually exclusive disable_ahub and enable_ahub
with self.assertRaises(MutuallyExclusiveArgumentError):
self.assertEqual(ctx_2.get_enable_ahub(), True)
def test_get_disable_ahub(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"disable_ahub": False}),
self.models,
DecoratorMode.UPDATE,
)
self.assertEqual(ctx_1.get_disable_ahub(), False)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"enable_ahub": True, "disable_ahub": True}),
self.models,
DecoratorMode.UPDATE,
)
# fail on mutually exclusive disable_ahub and enable_ahub
with self.assertRaises(MutuallyExclusiveArgumentError):
self.assertEqual(ctx_2.get_disable_ahub(), True)
def test_get_enable_windows_gmsa(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_windows_gmsa": False,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_windows_gmsa(), False)
windows_gmsa_profile_1 = self.models.WindowsGmsaProfile(enabled=True)
windows_profile_1 = self.models.ManagedClusterWindowsProfile(
admin_username="test_admin_username",
gmsa_profile=windows_gmsa_profile_1,
)
mc = self.models.ManagedCluster(location="test_location", windows_profile=windows_profile_1)
ctx_1.attach_mc(mc)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.prompt_y_n",
return_value=True,
):
self.assertEqual(ctx_1.get_enable_windows_gmsa(), True)
def test_get_gmsa_dns_server_and_root_domain_name(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_windows_gmsa": False,
"gmsa_dns_server": None,
"gmsa_root_domain_name": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_gmsa_dns_server_and_root_domain_name(), (None, None))
windows_gmsa_profile_1 = self.models.WindowsGmsaProfile(
enabled=True,
dns_server="test_dns_server",
root_domain_name="test_root_domain_name",
)
windows_profile_1 = self.models.ManagedClusterWindowsProfile(
admin_username="test_admin_username",
gmsa_profile=windows_gmsa_profile_1,
)
mc = self.models.ManagedCluster(location="test_location", windows_profile=windows_profile_1)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_gmsa_dns_server_and_root_domain_name(),
("test_dns_server", "test_root_domain_name"),
)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_windows_gmsa": True,
"gmsa_dns_server": "test_gmsa_dns_server",
"gmsa_root_domain_name": "test_gmsa_root_domain_name",
}
),
self.models,
DecoratorMode.CREATE,
)
windows_gmsa_profile_2 = self.models.WindowsGmsaProfile(
enabled=True,
dns_server="test_dns_server",
root_domain_name=None,
)
windows_profile_2 = self.models.ManagedClusterWindowsProfile(
admin_username="test_admin_username",
gmsa_profile=windows_gmsa_profile_2,
)
mc = self.models.ManagedCluster(location="test_location", windows_profile=windows_profile_2)
ctx_2.attach_mc(mc)
# fail on inconsistent state
with self.assertRaises(CLIInternalError):
ctx_2.get_gmsa_dns_server_and_root_domain_name()
def test_get_service_principal_and_client_secret(
self,
):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_managed_identity": True,
"service_principal": None,
"client_secret": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(
ctx_1.get_service_principal_and_client_secret(),
(None, None),
)
# dynamic completion
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"name": "test_name",
"resource_group_name": "test_rg_name",
"enable_managed_identity": True,
"service_principal": "test_service_principal",
"client_secret": "test_client_secret",
}
),
self.models,
DecoratorMode.CREATE,
)
ctx_2.set_intermediate("subscription_id", "1234-5678", overwrite_exists=True)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.get_rg_location",
return_value="test_location",
), patch(
"azure.cli.command_modules.acs._graph.get_graph_rbac_management_client",
return_value=None,
):
self.assertEqual(
ctx_2.get_service_principal_and_client_secret(),
("test_service_principal", "test_client_secret"),
)
# dynamic completion
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"name": "test_name",
"resource_group_name": "test_rg_name",
"enable_managed_identity": True,
"service_principal": None,
"client_secret": "test_client_secret",
}
),
self.models,
DecoratorMode.CREATE,
)
ctx_3.set_intermediate("subscription_id", "1234-5678", overwrite_exists=True)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.get_rg_location",
return_value="test_location",
), patch("azure.cli.command_modules.acs._graph.get_graph_rbac_management_client", return_value=None), patch(
"azure.cli.command_modules.acs._graph.build_service_principal",
return_value=("test_service_principal", "test_aad_session_key"),
):
self.assertEqual(
ctx_3.get_service_principal_and_client_secret(),
("test_service_principal", "test_client_secret"),
)
service_principal_profile = self.models.ManagedClusterServicePrincipalProfile(
client_id="test_mc_service_principal",
)
mc = self.models.ManagedCluster(
location="test_location",
service_principal_profile=service_principal_profile,
)
ctx_3.attach_mc(mc)
# fail on inconsistent state
with self.assertRaises(CLIInternalError):
self.assertEqual(
ctx_3.get_service_principal_and_client_secret(),
("test_mc_service_principal", "test_mc_client_secret"),
)
# dynamic completion
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"name": "test_name",
"resource_group_name": "test_rg_name",
"enable_managed_identity": True,
"service_principal": "test_service_principal",
"client_secret": None,
}
),
self.models,
DecoratorMode.CREATE,
)
ctx_4.set_intermediate("subscription_id", "1234-5678", overwrite_exists=True)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.get_rg_location",
return_value="test_location",
), patch(
"azure.cli.command_modules.acs._graph.get_graph_rbac_management_client",
return_value=None,
):
# fail on client_secret not specified
with self.assertRaises(CLIError):
ctx_4.get_service_principal_and_client_secret()
def test_get_enable_managed_identity(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_managed_identity": True,
"service_principal": None,
"client_secret": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_managed_identity(), True)
identity = self.models.ManagedClusterIdentity()
mc = self.models.ManagedCluster(location="test_location", identity=identity)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_managed_identity(), False)
# dynamic completion
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_managed_identity": True,
"service_principal": "test_service_principal",
"client_secret": "test_client_secret",
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_2.get_enable_managed_identity(), False)
# custom value
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_managed_identity": False,
"assign_identity": "test_assign_identity",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on enable_managed_identity not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_3.get_enable_managed_identity()
def test_get_skip_subnet_role_assignment(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"skip_subnet_role_assignment": False}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_skip_subnet_role_assignment(), False)
def test_get_assign_identity(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"assign_identity": None}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_assign_identity(), None)
user_assigned_identity = {
"test_assign_identity": self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()
}
identity = self.models.ManagedClusterIdentity(
type="UserAssigned", user_assigned_identities=user_assigned_identity
)
mc = self.models.ManagedCluster(location="test_location", identity=identity)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_assign_identity(), "test_assign_identity")
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_managed_identity": False,
"assign_identity": "test_assign_identity",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on enable_managed_identity not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_2.get_assign_identity()
# custom value
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"assign_identity": None,
"assign_kubelet_identity": "test_assign_kubelet_identity",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on assign_identity not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_3.get_assign_identity()
def test_get_identity_by_msi_client(self):
# custom value
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"assign_identity": "/subscriptions/1234/resourcegroups/test_rg/providers/microsoft.managedidentity/userassignedidentities/5678",
"enable_managed_identity": True,
}
),
self.models,
DecoratorMode.CREATE,
)
identity_obj = Mock(client_id="1234-5678", principal_id="8765-4321")
msi_client = Mock(user_assigned_identities=Mock(get=Mock(return_value=identity_obj)))
with patch(
"azure.cli.command_modules.acs._helpers.get_msi_client",
return_value=msi_client,
) as get_msi_client:
identity = ctx_1.get_identity_by_msi_client(ctx_1.get_assign_identity())
self.assertEqual(identity.client_id, "1234-5678")
self.assertEqual(identity.principal_id, "8765-4321")
get_msi_client.assert_called_once_with(self.cmd.cli_ctx, "1234")
msi_client.user_assigned_identities.get.assert_called_once_with(
resource_group_name="test_rg", resource_name="5678"
)
def test_get_user_assigned_identity_client_id(self):
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"assign_identity": None}),
self.models,
DecoratorMode.CREATE,
)
# fail on assign_identity not provided
with self.assertRaises(RequiredArgumentMissingError):
ctx_1.get_user_assigned_identity_client_id()
# custom value
identity_obj = Mock(
client_id="test_client_id",
)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.AKSManagedClusterContext.get_identity_by_msi_client",
return_value=identity_obj,
) as get_identity_helper:
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"assign_identity": "test_assign_identity",
"enable_managed_identity": True,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_2.get_user_assigned_identity_client_id(), "test_client_id")
get_identity_helper.assert_called_with("test_assign_identity")
self.assertEqual(ctx_2.get_user_assigned_identity_client_id("custom_assign_identity"), "test_client_id")
get_identity_helper.assert_called_with("custom_assign_identity")
def test_get_user_assigned_identity_object_id(self):
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"assign_identity": None}),
self.models,
DecoratorMode.CREATE,
)
# fail on assign_identity not provided
with self.assertRaises(RequiredArgumentMissingError):
ctx_1.get_user_assigned_identity_object_id()
# custom value
identity_obj = Mock(
principal_id="test_principal_id",
)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.AKSManagedClusterContext.get_identity_by_msi_client",
return_value=identity_obj,
) as get_identity_helper:
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"assign_identity": "test_assign_identity",
"enable_managed_identity": True,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_2.get_user_assigned_identity_object_id(), "test_principal_id")
get_identity_helper.assert_called_with("test_assign_identity")
self.assertEqual(ctx_2.get_user_assigned_identity_object_id("custom_assign_identity"), "test_principal_id")
get_identity_helper.assert_called_with("custom_assign_identity")
def test_get_attach_acr(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"attach_acr": None}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_attach_acr(), None)
# invalid parameter
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"attach_acr": "test_attach_acr",
"enable_managed_identity": True,
"no_wait": True,
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on mutually exclusive enable_managed_identity and no_wait
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_2.get_attach_acr()
# invalid parameter
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"attach_acr": "test_attach_acr",
"enable_managed_identity": False,
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on service_principal/client_secret not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_3.get_attach_acr()
# custom value (update mode)
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"attach_acr": "test_attach_acr",
"enable_managed_identity": True,
"no_wait": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
self.assertEqual(ctx_4.get_attach_acr(), "test_attach_acr")
# custom value
ctx_5 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"attach_acr": "test_attach_acr",
"enable_managed_identity": True,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_5.get_attach_acr(), "test_attach_acr")
def test_get_detach_acr(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"detach_acr": None}),
self.models,
DecoratorMode.UPDATE,
)
self.assertEqual(ctx_1.get_detach_acr(), None)
def test_get_assignee_from_identity_or_sp_profile(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({}),
self.models,
DecoratorMode.UPDATE,
)
# fail on no mc attached and no client id found
with self.assertRaises(UnknownError):
ctx_1.get_assignee_from_identity_or_sp_profile()
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({}),
self.models,
DecoratorMode.UPDATE,
)
mc_2 = self.models.ManagedCluster(
location="test_location",
identity=self.models.ManagedClusterIdentity(type="SystemAssigned"),
)
ctx_2.attach_mc(mc_2)
# fail on kubelet identity not found
with self.assertRaises(UnknownError):
ctx_2.get_assignee_from_identity_or_sp_profile()
# custom value
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({}),
self.models,
DecoratorMode.UPDATE,
)
mc_3 = self.models.ManagedCluster(
location="test_location",
identity=self.models.ManagedClusterIdentity(type="UserAssigned"),
identity_profile={
"kubeletidentity": self.models.UserAssignedIdentity(
client_id="test_client_id", object_id="test_object_id"
)
},
)
ctx_3.attach_mc(mc_3)
self.assertEqual(
ctx_3.get_assignee_from_identity_or_sp_profile(),
("test_object_id", False),
)
# custom value
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({}),
self.models,
DecoratorMode.UPDATE,
)
mc_4 = self.models.ManagedCluster(
location="test_location",
service_principal_profile=self.models.ManagedClusterServicePrincipalProfile(client_id="test_client_id"),
)
ctx_4.attach_mc(mc_4)
self.assertEqual(
ctx_4.get_assignee_from_identity_or_sp_profile(),
("test_client_id", True),
)
def test_get_load_balancer_sku(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"load_balancer_sku": None}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_load_balancer_sku(), CONST_LOAD_BALANCER_SKU_STANDARD)
network_profile = self.models.ContainerServiceNetworkProfile(load_balancer_sku="test_mc_load_balancer_SKU")
mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_load_balancer_sku(), "test_mc_load_balancer_sku")
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"load_balancer_sku": CONST_LOAD_BALANCER_SKU_BASIC}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_2.get_load_balancer_sku(), CONST_LOAD_BALANCER_SKU_BASIC)
# invalid parameter with validation
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"load_balancer_sku": CONST_LOAD_BALANCER_SKU_BASIC,
"api_server_authorized_ip_ranges": "test_api_server_authorized_ip_ranges",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on invalid load_balancer_sku (basic) when api_server_authorized_ip_ranges is assigned
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_load_balancer_sku()
# invalid parameter with validation
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"load_balancer_sku": CONST_LOAD_BALANCER_SKU_BASIC,
"enable_private_cluster": True,
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on invalid load_balancer_sku (basic) when enable_private_cluster is specified
with self.assertRaises(InvalidArgumentValueError):
ctx_4.get_load_balancer_sku()
# custom value (lower case)
ctx_5 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"load_balancer_sku": "STANDARD"}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_5.get_load_balancer_sku(), "standard")
def test_get_load_balancer_managed_outbound_ip_count(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"load_balancer_managed_outbound_ip_count": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_load_balancer_managed_outbound_ip_count(), None)
load_balancer_profile = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile(
managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=10
)
)
network_profile = self.models.ContainerServiceNetworkProfile(load_balancer_profile=load_balancer_profile)
mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_load_balancer_managed_outbound_ip_count(), 10)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"load_balancer_managed_outbound_ip_count": None,
}
),
self.models,
DecoratorMode.UPDATE,
)
load_balancer_profile_2 = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile(
managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=10
)
)
network_profile_2 = self.models.ContainerServiceNetworkProfile(load_balancer_profile=load_balancer_profile_2)
mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile_2)
ctx_2.attach_mc(mc)
self.assertEqual(ctx_2.get_load_balancer_managed_outbound_ip_count(), None)
def test_get_load_balancer_outbound_ips(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"load_balancer_outbound_ips": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_load_balancer_outbound_ips(), None)
load_balancer_profile = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile(
outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPs(
public_i_ps=[self.models.load_balancer_models.ResourceReference(id="test_public_ip")]
)
)
network_profile = self.models.ContainerServiceNetworkProfile(load_balancer_profile=load_balancer_profile)
mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_load_balancer_outbound_ips(),
[self.models.load_balancer_models.ResourceReference(id="test_public_ip")],
)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"load_balancer_outbound_ips": None,
}
),
self.models,
DecoratorMode.UPDATE,
)
load_balancer_profile_2 = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile(
outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPs(
public_i_ps=[self.models.load_balancer_models.ResourceReference(id="test_public_ip")]
)
)
network_profile_2 = self.models.ContainerServiceNetworkProfile(load_balancer_profile=load_balancer_profile_2)
mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile_2)
ctx_2.attach_mc(mc)
self.assertEqual(ctx_2.get_load_balancer_outbound_ips(), None)
def test_get_load_balancer_outbound_ip_prefixes(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"load_balancer_outbound_ip_prefixes": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_load_balancer_outbound_ip_prefixes(), None)
load_balancer_profile = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile(
outbound_ip_prefixes=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes=[self.models.load_balancer_models.ResourceReference(id="test_public_ip_prefix")]
)
)
network_profile = self.models.ContainerServiceNetworkProfile(load_balancer_profile=load_balancer_profile)
mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_load_balancer_outbound_ip_prefixes(),
[self.models.load_balancer_models.ResourceReference(id="test_public_ip_prefix")],
)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"load_balancer_outbound_ip_prefixes": None,
}
),
self.models,
DecoratorMode.UPDATE,
)
self.assertEqual(ctx_2.get_load_balancer_outbound_ip_prefixes(), None)
load_balancer_profile_2 = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile(
outbound_ip_prefixes=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes=[self.models.load_balancer_models.ResourceReference(id="test_public_ip_prefix")]
)
)
network_profile_2 = self.models.ContainerServiceNetworkProfile(load_balancer_profile=load_balancer_profile_2)
mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile_2)
ctx_2.attach_mc(mc)
self.assertEqual(ctx_2.get_load_balancer_outbound_ip_prefixes(), None)
def test_get_load_balancer_outbound_ports(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"load_balancer_outbound_ports": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_load_balancer_outbound_ports(), None)
load_balancer_profile = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile(
allocated_outbound_ports=10
)
network_profile = self.models.ContainerServiceNetworkProfile(load_balancer_profile=load_balancer_profile)
mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_load_balancer_outbound_ports(), 10)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"load_balancer_outbound_ports": None,
}
),
self.models,
DecoratorMode.UPDATE,
)
self.assertEqual(ctx_2.get_load_balancer_outbound_ports(), None)
load_balancer_profile_2 = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile(
allocated_outbound_ports=10
)
network_profile_2 = self.models.ContainerServiceNetworkProfile(load_balancer_profile=load_balancer_profile_2)
mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile_2)
ctx_2.attach_mc(mc)
self.assertEqual(ctx_2.get_load_balancer_outbound_ports(), None)
def test_get_load_balancer_idle_timeout(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"load_balancer_idle_timeout": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_load_balancer_idle_timeout(), None)
load_balancer_profile = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile(
idle_timeout_in_minutes=10
)
network_profile = self.models.ContainerServiceNetworkProfile(load_balancer_profile=load_balancer_profile)
mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_load_balancer_idle_timeout(), 10)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"load_balancer_idle_timeout": None,
}
),
self.models,
DecoratorMode.UPDATE,
)
self.assertEqual(ctx_2.get_load_balancer_idle_timeout(), None)
load_balancer_profile_2 = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile(
idle_timeout_in_minutes=10
)
network_profile_2 = self.models.ContainerServiceNetworkProfile(load_balancer_profile=load_balancer_profile_2)
mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile_2)
ctx_2.attach_mc(mc)
self.assertEqual(ctx_2.get_load_balancer_idle_timeout(), None)
def test_get_nat_gateway_managed_outbound_ip_count(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"nat_gateway_managed_outbound_ip_count": None}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_nat_gateway_managed_outbound_ip_count(), None)
nat_gateway_profile = self.models.nat_gateway_models.ManagedClusterNATGatewayProfile(
managed_outbound_ip_profile=self.models.nat_gateway_models.ManagedClusterManagedOutboundIPProfile(count=10)
)
network_profile = self.models.ContainerServiceNetworkProfile(nat_gateway_profile=nat_gateway_profile)
mc = self.models.ManagedCluster(
location="test_location",
network_profile=network_profile,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_nat_gateway_managed_outbound_ip_count(), 10)
def test_get_nat_gateway_idle_timeout(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"nat_gateway_idle_timeout": None}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_nat_gateway_idle_timeout(), None)
nat_gateway_profile = self.models.nat_gateway_models.ManagedClusterNATGatewayProfile(
idle_timeout_in_minutes=20,
)
network_profile = self.models.ContainerServiceNetworkProfile(nat_gateway_profile=nat_gateway_profile)
mc = self.models.ManagedCluster(
location="test_location",
network_profile=network_profile,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_nat_gateway_idle_timeout(), 20)
def test_get_outbound_type(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"outbound_type": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1._get_outbound_type(read_only=True), None)
self.assertEqual(ctx_1.get_outbound_type(), "loadBalancer")
network_profile_1 = self.models.ContainerServiceNetworkProfile(outbound_type="test_outbound_type")
mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_outbound_type(), "test_outbound_type")
# invalid parameter
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"outbound_type": CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
"load_balancer_sku": "basic",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on invalid load_balancer_sku (basic) when outbound_type is CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_outbound_type()
# invalid parameter
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"outbound_type": CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
}
),
self.models,
DecoratorMode.CREATE,
)
agentpool_ctx_3 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({}),
self.models,
DecoratorMode.CREATE,
AgentPoolDecoratorMode.MANAGED_CLUSTER,
)
ctx_3.attach_agentpool_context(agentpool_ctx_3)
# fail on vnet_subnet_id not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_3.get_outbound_type()
# invalid parameter
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"outbound_type": CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
"vnet_subnet_id": "test_vnet_subnet_id",
"load_balancer_managed_outbound_ip_count": 10,
}
),
self.models,
DecoratorMode.CREATE,
)
agentpool_ctx_4 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"vnet_subnet_id": "test_vnet_subnet_id"}),
self.models,
DecoratorMode.CREATE,
AgentPoolDecoratorMode.MANAGED_CLUSTER,
)
ctx_4.attach_agentpool_context(agentpool_ctx_4)
# fail on mutually exclusive outbound_type and managed_outbound_ip_count/outbound_ips/outbound_ip_prefixes of
# load balancer
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_4.get_outbound_type()
# invalid parameter
ctx_5 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"outbound_type": CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
"vnet_subnet_id": "test_vnet_subnet_id",
}
),
self.models,
DecoratorMode.CREATE,
)
agentpool_ctx_5 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"vnet_subnet_id": "test_vnet_subnet_id"}),
self.models,
DecoratorMode.CREATE,
AgentPoolDecoratorMode.MANAGED_CLUSTER,
)
ctx_5.attach_agentpool_context(agentpool_ctx_5)
load_balancer_profile = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile(
outbound_ip_prefixes=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes=[self.models.load_balancer_models.ResourceReference(id="test_public_ip_prefix")]
)
)
# fail on mutually exclusive outbound_type and managed_outbound_ip_count/outbound_ips/outbound_ip_prefixes of
# load balancer
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_5.get_outbound_type(
load_balancer_profile=load_balancer_profile,
)
def test_get_network_plugin(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"network_plugin": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_network_plugin(), None)
network_profile_1 = self.models.ContainerServiceNetworkProfile(network_plugin="test_network_plugin")
mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_network_plugin(), "test_network_plugin")
# invalid parameter
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"network_plugin": "azure",
"pod_cidr": "test_pod_cidr",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on invalid network_plugin (azure) when pod_cidr is specified
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_network_plugin()
# invalid parameter
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"pod_cidr": "test_pod_cidr",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on network_plugin not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_3.get_network_plugin()
def test_get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy(
self,
):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"pod_cidr": None,
"service_cidr": None,
"dns_service_ip": None,
"docker_bridge_address": None,
"network_policy": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(
ctx_1.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy(),
(None, None, None, None, None),
)
network_profile_1 = self.models.ContainerServiceNetworkProfile(
pod_cidr="test_pod_cidr",
service_cidr="test_service_cidr",
dns_service_ip="test_dns_service_ip",
docker_bridge_cidr="test_docker_bridge_address",
network_policy="test_network_policy",
)
mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile_1)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy(),
(
"test_pod_cidr",
"test_service_cidr",
"test_dns_service_ip",
"test_docker_bridge_address",
"test_network_policy",
),
)
# invalid parameter
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"network_plugin": "azure",
"pod_cidr": "test_pod_cidr",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on invalid network_plugin (azure) when pod_cidr is specified
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()
# invalid parameter
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"pod_cidr": "test_pod_cidr",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on network_plugin not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_3.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()
# invalid parameter
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"service_cidr": "test_service_cidr",
"dns_service_ip": "test_dns_service_ip",
"docker_bridge_address": "test_docker_bridge_address",
"network_policy": "test_network_policy",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on network_plugin not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_4.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()
def test_get_addon_consts(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({}),
self.models,
DecoratorMode.CREATE,
)
addon_consts = ctx_1.get_addon_consts()
ground_truth_addon_consts = {
"ADDONS": ADDONS,
"CONST_ACC_SGX_QUOTE_HELPER_ENABLED": CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
"CONST_AZURE_POLICY_ADDON_NAME": CONST_AZURE_POLICY_ADDON_NAME,
"CONST_CONFCOM_ADDON_NAME": CONST_CONFCOM_ADDON_NAME,
"CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME": CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
"CONST_INGRESS_APPGW_ADDON_NAME": CONST_INGRESS_APPGW_ADDON_NAME,
"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID": CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME": CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
"CONST_INGRESS_APPGW_SUBNET_CIDR": CONST_INGRESS_APPGW_SUBNET_CIDR,
"CONST_INGRESS_APPGW_SUBNET_ID": CONST_INGRESS_APPGW_SUBNET_ID,
"CONST_INGRESS_APPGW_WATCH_NAMESPACE": CONST_INGRESS_APPGW_WATCH_NAMESPACE,
"CONST_KUBE_DASHBOARD_ADDON_NAME": CONST_KUBE_DASHBOARD_ADDON_NAME,
"CONST_MONITORING_ADDON_NAME": CONST_MONITORING_ADDON_NAME,
"CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID": CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
"CONST_OPEN_SERVICE_MESH_ADDON_NAME": CONST_OPEN_SERVICE_MESH_ADDON_NAME,
"CONST_VIRTUAL_NODE_ADDON_NAME": CONST_VIRTUAL_NODE_ADDON_NAME,
"CONST_VIRTUAL_NODE_SUBNET_NAME": CONST_VIRTUAL_NODE_SUBNET_NAME,
"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME": CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
"CONST_SECRET_ROTATION_ENABLED": CONST_SECRET_ROTATION_ENABLED,
"CONST_ROTATION_POLL_INTERVAL": CONST_ROTATION_POLL_INTERVAL,
"CONST_MONITORING_USING_AAD_MSI_AUTH": CONST_MONITORING_USING_AAD_MSI_AUTH,
}
self.assertEqual(addon_consts, ground_truth_addon_consts)
def test_get_enable_addons(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_addons": None,
}
),
self.models,
DecoratorMode.CREATE,
)
agentpool_ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({}),
self.models,
DecoratorMode.CREATE,
AgentPoolDecoratorMode.MANAGED_CLUSTER,
)
ctx_1.attach_agentpool_context(agentpool_ctx_1)
self.assertEqual(ctx_1.get_enable_addons(), [])
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_addons": "http_application_routing,monitoring",
}
),
self.models,
DecoratorMode.CREATE,
)
agentpool_ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({}),
self.models,
DecoratorMode.CREATE,
AgentPoolDecoratorMode.MANAGED_CLUSTER,
)
ctx_2.attach_agentpool_context(agentpool_ctx_2)
self.assertEqual(
ctx_2.get_enable_addons(),
["http_application_routing", "monitoring"],
)
# invalid parameter
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_addons": "test_addon_1,test_addon_2",
}
),
self.models,
DecoratorMode.CREATE,
)
agentpool_ctx_3 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({}),
self.models,
DecoratorMode.CREATE,
AgentPoolDecoratorMode.MANAGED_CLUSTER,
)
ctx_3.attach_agentpool_context(agentpool_ctx_3)
# fail on invalid enable_addons
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_enable_addons()
# invalid parameter
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_addons": "test_addon_1,test_addon_2,test_addon_1,test_addon_2",
}
),
self.models,
DecoratorMode.CREATE,
)
agentpool_ctx_4 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({}),
self.models,
DecoratorMode.CREATE,
AgentPoolDecoratorMode.MANAGED_CLUSTER,
)
ctx_4.attach_agentpool_context(agentpool_ctx_4)
# fail on invalid/duplicate enable_addons
with self.assertRaises(InvalidArgumentValueError):
ctx_4.get_enable_addons()
# invalid parameter
ctx_5 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"workspace_resource_id": "/test_workspace_resource_id",
"enable_addons": "",
}
),
self.models,
DecoratorMode.CREATE,
)
agentpool_ctx_5 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({}),
self.models,
DecoratorMode.CREATE,
AgentPoolDecoratorMode.MANAGED_CLUSTER,
)
ctx_5.attach_agentpool_context(agentpool_ctx_5)
# fail on enable_addons (monitoring) not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_5.get_enable_addons()
# invalid parameter
ctx_6 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_addons": "virtual-node",
}
),
self.models,
DecoratorMode.CREATE,
)
agentpool_ctx_6 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({}),
self.models,
DecoratorMode.CREATE,
AgentPoolDecoratorMode.MANAGED_CLUSTER,
)
ctx_6.attach_agentpool_context(agentpool_ctx_6)
# fail on aci_subnet_name/vnet_subnet_id not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_6.get_enable_addons()
def test_get_workspace_resource_id(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"workspace_resource_id": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1._get_workspace_resource_id(read_only=True), None)
addon_profiles_1 = {
CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "test_workspace_resource_id"},
)
}
mc = self.models.ManagedCluster(location="test_location", addon_profiles=addon_profiles_1)
ctx_1.attach_mc(mc)
# fail on enable_addons (monitoring) not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_1.get_workspace_resource_id()
# custom value & dynamic completion
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_addons": "monitoring",
"workspace_resource_id": "test_workspace_resource_id/",
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_2.get_workspace_resource_id(), "/test_workspace_resource_id")
# dynamic completion
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_addons": "monitoring",
"resource_group_name": "test_rg_name",
"workspace_resource_id": None,
}
),
self.models,
DecoratorMode.CREATE,
)
ctx_3.set_intermediate("subscription_id", "test_subscription_id")
cf_resource_groups = Mock(check_existence=Mock(return_value=False))
result = Mock(id="test_workspace_resource_id")
async_poller = Mock(result=Mock(return_value=result), done=Mock(return_value=True))
cf_resources = Mock(begin_create_or_update_by_id=Mock(return_value=async_poller))
with patch(
"azure.cli.command_modules.acs.addonconfiguration.get_rg_location",
return_value="test_location",
), patch(
"azure.cli.command_modules.acs.addonconfiguration.cf_resource_groups",
return_value=cf_resource_groups,
), patch(
"azure.cli.command_modules.acs.addonconfiguration.cf_resources",
return_value=cf_resources,
):
self.assertEqual(ctx_3.get_workspace_resource_id(), "/test_workspace_resource_id")
cf_resource_groups.check_existence.assert_called_once_with("DefaultResourceGroup-EUS")
cf_resource_groups.create_or_update.assert_called_once_with("DefaultResourceGroup-EUS", {"location": "eastus"})
default_workspace_resource_id = (
"/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights/workspaces/{2}".format(
"test_subscription_id",
"DefaultResourceGroup-EUS",
"DefaultWorkspace-test_subscription_id-EUS",
)
)
# the return values are func_name, args and kwargs
_, args, _ = cf_resources.begin_create_or_update_by_id.mock_calls[0]
# not interested in mocking generic_resource, so we only check the first two args
self.assertEqual(args[:2], (default_workspace_resource_id, "2015-11-01-preview"))
def test_get_enable_msi_auth_for_monitoring(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_msi_auth_for_monitoring": False,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_msi_auth_for_monitoring(), False)
addon_profiles_1 = {
CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_MONITORING_USING_AAD_MSI_AUTH: "True"},
)
}
mc = self.models.ManagedCluster(location="test_location", addon_profiles=addon_profiles_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_msi_auth_for_monitoring(), True)
def test_get_virtual_node_addon_os_type(self):
# default
ctx_1 = AKSManagedClusterContext(self.cmd, AKSManagedClusterParamDict({}), self.models, DecoratorMode.CREATE)
self.assertEqual(ctx_1.get_virtual_node_addon_os_type(), "Linux")
def test_get_aci_subnet_name(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"aci_subnet_name": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_aci_subnet_name(), None)
addon_profiles_1 = {
CONST_VIRTUAL_NODE_ADDON_NAME
+ ctx_1.get_virtual_node_addon_os_type(): self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: "test_aci_subnet_name"},
)
}
mc = self.models.ManagedCluster(location="test_location", addon_profiles=addon_profiles_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_aci_subnet_name(), "test_aci_subnet_name")
def test_get_appgw_name(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"appgw_name": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_appgw_name(), None)
addon_profiles_1 = {
CONST_INGRESS_APPGW_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME: "test_appgw_name"},
)
}
mc = self.models.ManagedCluster(location="test_location", addon_profiles=addon_profiles_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_appgw_name(), "test_appgw_name")
def test_get_appgw_subnet_cidr(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"appgw_subnet_cidr": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_appgw_subnet_cidr(), None)
addon_profiles_1 = {
CONST_INGRESS_APPGW_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_cidr"},
)
}
mc = self.models.ManagedCluster(location="test_location", addon_profiles=addon_profiles_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_appgw_subnet_cidr(), "test_appgw_subnet_cidr")
def test_get_appgw_id(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"appgw_id": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_appgw_id(), None)
addon_profiles_1 = {
CONST_INGRESS_APPGW_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID: "test_appgw_id"},
)
}
mc = self.models.ManagedCluster(location="test_location", addon_profiles=addon_profiles_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_appgw_id(), "test_appgw_id")
def test_get_appgw_subnet_id(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"appgw_subnet_id": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_appgw_subnet_id(), None)
addon_profiles_1 = {
CONST_INGRESS_APPGW_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_INGRESS_APPGW_SUBNET_ID: "test_appgw_subnet_id"},
)
}
mc = self.models.ManagedCluster(location="test_location", addon_profiles=addon_profiles_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_appgw_subnet_id(), "test_appgw_subnet_id")
def test_get_appgw_watch_namespace(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"appgw_watch_namespace": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_appgw_watch_namespace(), None)
addon_profiles_1 = {
CONST_INGRESS_APPGW_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_INGRESS_APPGW_WATCH_NAMESPACE: "test_appgw_watch_namespace"},
)
}
mc = self.models.ManagedCluster(location="test_location", addon_profiles=addon_profiles_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_appgw_watch_namespace(), "test_appgw_watch_namespace")
def test_get_enable_sgxquotehelper(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_sgxquotehelper": False,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_sgxquotehelper(), False)
addon_profiles_1 = {
CONST_CONFCOM_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "true"},
)
}
mc = self.models.ManagedCluster(location="test_location", addon_profiles=addon_profiles_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_sgxquotehelper(), True)
def test_get_enable_secret_rotation(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_secret_rotation": False,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_secret_rotation(), False)
addon_profiles_1 = {
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_SECRET_ROTATION_ENABLED: "true"},
)
}
mc = self.models.ManagedCluster(location="test_location", addon_profiles=addon_profiles_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_secret_rotation(), True)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_secret_rotation": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
# fail on azure keyvault secrets provider not enabled
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_enable_secret_rotation()
def test_get_disable_secret_rotation(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"disable_secret_rotation": False,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_disable_secret_rotation(), False)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"disable_secret_rotation": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
# fail on azure keyvault secrets provider not enabled
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_disable_secret_rotation()
def test_get_rotation_poll_interval(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"rotation_poll_interval": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_rotation_poll_interval(), None)
addon_profiles_1 = {
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_ROTATION_POLL_INTERVAL: "2m"},
)
}
mc = self.models.ManagedCluster(location="test_location", addon_profiles=addon_profiles_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_rotation_poll_interval(), "2m")
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"rotation_poll_interval": "2m",
}
),
self.models,
DecoratorMode.UPDATE,
)
# fail on azure keyvault secrets provider not enabled
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_rotation_poll_interval()
def test_get_enable_aad(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_aad": False,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_aad(), False)
aad_profile_1 = self.models.ManagedClusterAADProfile(
managed=True,
)
mc = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_aad(), True)
# invalid parameter
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_aad": True,
"aad_client_app_id": "test_aad_client_app_id",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on mutually exclusive enable_aad and aad_client_app_id/aad_server_app_id/aad_server_app_secret
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_2.get_enable_aad()
# invalid parameter
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_aad": False,
"enable_azure_rbac": True,
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on enable_aad not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_3.get_enable_aad()
# custom value
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_aad": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
aad_profile_4 = self.models.ManagedClusterAADProfile(
managed=True,
)
mc_4 = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_4)
ctx_4.attach_mc(mc_4)
# fail on managed aad already enabled
with self.assertRaises(InvalidArgumentValueError):
ctx_4.get_enable_aad()
def test_get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(
self,
):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"aad_client_app_id": None,
"aad_server_app_id": None,
"aad_server_app_secret": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(
ctx_1.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(),
(None, None, None),
)
aad_profile_1 = self.models.ManagedClusterAADProfile(
client_app_id="test_aad_client_app_id",
server_app_id="test_aad_server_app_id",
server_app_secret="test_aad_server_app_secret",
)
mc = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_1)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(),
(
"test_aad_client_app_id",
"test_aad_server_app_id",
"test_aad_server_app_secret",
),
)
# invalid parameter
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_aad": True,
"aad_client_app_id": "test_aad_client_app_id",
"aad_server_app_id": "test_aad_server_app_id",
"aad_server_app_secret": "test_aad_server_app_secret",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on mutually exclusive enable_aad and aad_client_app_id/aad_server_app_id/aad_server_app_secret
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_2.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret()
def test_get_aad_tenant_id(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"aad_tenant_id": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1._get_aad_tenant_id(read_only=True), None)
self.assertEqual(ctx_1.get_aad_tenant_id(), None)
aad_profile_1 = self.models.ManagedClusterAADProfile(
tenant_id="test_aad_tenant_id",
)
mc = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_aad_tenant_id(), "test_aad_tenant_id")
# dynamic completion
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_aad": False,
"aad_client_app_id": "test_aad_client_app_id",
}
),
self.models,
DecoratorMode.CREATE,
)
profile = Mock(get_login_credentials=Mock(return_value=(None, None, "test_aad_tenant_id")))
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.Profile",
return_value=profile,
):
self.assertEqual(ctx_2.get_aad_tenant_id(), "test_aad_tenant_id")
# custom value
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"aad_tenant_id": "test_aad_tenant_id",
}
),
self.models,
DecoratorMode.UPDATE,
)
aad_profile_3 = self.models.ManagedClusterAADProfile(
managed=False,
)
mc_3 = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_3)
ctx_3.attach_mc(mc_3)
# fail on managed aad not enabled
with self.assertRaises(InvalidArgumentValueError):
self.assertEqual(
ctx_3.get_aad_tenant_id(),
"test_aad_tenant_id",
)
def test_get_aad_admin_group_object_ids(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"aad_admin_group_object_ids": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_aad_admin_group_object_ids(), None)
aad_profile_1 = self.models.ManagedClusterAADProfile(
admin_group_object_i_ds="test_aad_admin_group_object_ids",
)
mc = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_1)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_aad_admin_group_object_ids(),
"test_aad_admin_group_object_ids",
)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"aad_admin_group_object_ids": "test_value_1,test_value_2",
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(
ctx_2.get_aad_admin_group_object_ids(),
["test_value_1", "test_value_2"],
)
# custom value
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"aad_admin_group_object_ids": "test_value_1,test_value_2",
}
),
self.models,
DecoratorMode.UPDATE,
)
aad_profile_3 = self.models.ManagedClusterAADProfile(
managed=False,
)
mc_3 = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_3)
ctx_3.attach_mc(mc_3)
# fail on managed aad not enabled
with self.assertRaises(InvalidArgumentValueError):
self.assertEqual(
ctx_3.get_aad_admin_group_object_ids(),
["test_value_1", "test_value_2"],
)
def test_get_disable_rbac(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"disable_rbac": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_disable_rbac(), None)
mc = self.models.ManagedCluster(location="test_location", enable_rbac=False)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_disable_rbac(), True)
# invalid parameter
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"disable_rbac": True,
"enable_azure_rbac": True,
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on mutually exclusive disable_rbac and enable_azure_rbac
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_2.get_disable_rbac()
# invalid parameter
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"disable_rbac": True,
"enable_rbac": True,
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on mutually exclusive disable_rbac and enable_rbac
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_3.get_disable_rbac()
def test_get_enable_rbac(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_rbac": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_rbac(), None)
mc = self.models.ManagedCluster(
location="test_location",
enable_rbac=True,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_rbac(), True)
# invalid parameter
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_rbac": True,
"disable_rbac": True,
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on mutually exclusive disable_rbac and enable_rbac
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_2.get_enable_rbac()
def test_get_enable_azure_rbac(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_azure_rbac": False,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_azure_rbac(), False)
aad_profile_1 = self.models.ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=True,
)
mc = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_azure_rbac(), True)
# invalid parameter
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({}),
self.models,
DecoratorMode.CREATE,
)
aad_profile_2 = self.models.ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=True,
)
mc_2 = self.models.ManagedCluster(
location="test_location",
enable_rbac=False,
aad_profile=aad_profile_2,
)
ctx_2.attach_mc(mc_2)
# fail on mutually exclusive enable_azure_rbac and disable_rbac
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_2.get_enable_azure_rbac()
# invalid parameter
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_azure_rbac": True,
"enable_aad": False,
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on enable_aad not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_3.get_enable_azure_rbac()
# custom value
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_azure_rbac": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
mc_4 = self.models.ManagedCluster(location="test_location")
ctx_4.attach_mc(mc_4)
# fail on managed aad not enabled
with self.assertRaises(InvalidArgumentValueError):
ctx_4.get_enable_azure_rbac()
# custom value
ctx_5 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_azure_rbac": True,
"disable_azure_rbac": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
aad_profile_5 = self.models.ManagedClusterAADProfile(
managed=True,
)
mc_5 = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_5)
ctx_5.attach_mc(mc_5)
# fail on mutually exclusive enable_azure_rbac and disable_azure_rbac
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_5.get_enable_azure_rbac()
def test_get_disable_azure_rbac(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"disable_azure_rbac": False,
}
),
self.models,
DecoratorMode.UPDATE,
)
self.assertEqual(ctx_1.get_disable_azure_rbac(), False)
aad_profile_1 = self.models.ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=False,
)
mc = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_1)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_disable_azure_rbac(), False)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"disable_azure_rbac": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
aad_profile_2 = self.models.ManagedClusterAADProfile(
managed=False,
)
mc_2 = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_2)
ctx_2.attach_mc(mc_2)
# fail on managed aad not enabled
with self.assertRaises(InvalidArgumentValueError):
self.assertEqual(ctx_2.get_disable_azure_rbac(), True)
# custom value
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_azure_rbac": True,
"disable_azure_rbac": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
aad_profile_3 = self.models.ManagedClusterAADProfile(
managed=True,
)
mc_3 = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_3)
ctx_3.attach_mc(mc_3)
# fail on mutually exclusive enable_azure_rbac and disable_azure_rbac
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_3.get_disable_azure_rbac()
def test_get_api_server_authorized_ip_ranges(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"api_server_authorized_ip_ranges": None}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(
ctx_1.get_api_server_authorized_ip_ranges(),
[],
)
api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile(
authorized_ip_ranges=["test_mc_api_server_authorized_ip_ranges"]
)
mc = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile,
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_api_server_authorized_ip_ranges(),
["test_mc_api_server_authorized_ip_ranges"],
)
# valid parameter with validation
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"load_balancer_sku": "standard",
"api_server_authorized_ip_ranges": "test_ip_range_1 , test_ip_range_2",
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(
ctx_2.get_api_server_authorized_ip_ranges(),
["test_ip_range_1", "test_ip_range_2"],
)
# invalid parameter
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"load_balancer_sku": "basic",
"api_server_authorized_ip_ranges": "test_api_server_authorized_ip_ranges",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on invalid load_balancer_sku (basic) when api_server_authorized_ip_ranges is assigned
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_api_server_authorized_ip_ranges()
# invalid parameter
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_private_cluster": True,
"api_server_authorized_ip_ranges": "test_api_server_authorized_ip_ranges",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on mutually exclusive enable_private_cluster and api_server_authorized_ip_ranges
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_4.get_api_server_authorized_ip_ranges()
# default (update mode)
ctx_5 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"api_server_authorized_ip_ranges": None,
}
),
self.models,
DecoratorMode.UPDATE,
)
self.assertEqual(ctx_5.get_api_server_authorized_ip_ranges(), None)
# custom value (update mode)
ctx_6 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"api_server_authorized_ip_ranges": "",
}
),
self.models,
DecoratorMode.UPDATE,
)
self.assertEqual(ctx_6.get_api_server_authorized_ip_ranges(), [])
# custom value (update mode)
ctx_7 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"api_server_authorized_ip_ranges": "test_api_server_authorized_ip_ranges",
}
),
self.models,
DecoratorMode.UPDATE,
)
api_server_access_profile_7 = self.models.ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True,
)
mc_7 = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile_7,
)
ctx_7.attach_mc(mc_7)
# fail on mutually exclusive api_server_authorized_ip_ranges and private cluster
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_7.get_api_server_authorized_ip_ranges()
def test_get_fqdn_subdomain(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"fqdn_subdomain": None}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_fqdn_subdomain(), None)
mc = self.models.ManagedCluster(location="test_location", fqdn_subdomain="test_mc_fqdn_subdomain")
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_fqdn_subdomain(), "test_mc_fqdn_subdomain")
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"dns_name_prefix": "test_dns_name_prefix",
"fqdn_subdomain": "test_fqdn_subdomain",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on mutually exclusive dns_name_prefix and fqdn_subdomain
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_2.get_fqdn_subdomain()
# custom value
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_private_cluster": True,
"fqdn_subdomain": "test_fqdn_subdomain",
"private_dns_zone": "system",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on fqdn_subdomain specified and private_dns_zone equals to CONST_PRIVATE_DNS_ZONE_SYSTEM
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_fqdn_subdomain()
# custom value
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_private_cluster": True,
"fqdn_subdomain": "test_fqdn_subdomain",
"private_dns_zone": "test_private_dns_zone",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on invalid private_dns_zone when fqdn_subdomain is specified
with self.assertRaises(InvalidArgumentValueError):
ctx_4.get_fqdn_subdomain()
def test_get_enable_private_cluster(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_private_cluster": False,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_private_cluster(), False)
api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True,
)
mc = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_private_cluster(), True)
# invalid parameter
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_private_cluster": True,
"load_balancer_sku": "basic",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on invalid load_balancer_sku (basic) when enable_private_cluster is specified
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_enable_private_cluster()
# invalid parameter
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_private_cluster": True,
"api_server_authorized_ip_ranges": "test_api_server_authorized_ip_ranges",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on mutually exclusive enable_private_cluster and api_server_authorized_ip_ranges
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_3.get_enable_private_cluster()
# invalid parameter
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_private_cluster": False,
"disable_public_fqdn": True,
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on disable_public_fqdn specified when enable_private_cluster is not specified
with self.assertRaises(InvalidArgumentValueError):
ctx_4.get_enable_private_cluster()
# invalid parameter
ctx_5 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_private_cluster": False,
"private_dns_zone": "system",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on private_dns_zone specified when enable_private_cluster is not specified
with self.assertRaises(InvalidArgumentValueError):
ctx_5.get_enable_private_cluster()
# custom value
ctx_6 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"disable_public_fqdn": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
api_server_access_profile_6 = self.models.ManagedClusterAPIServerAccessProfile(
enable_private_cluster=False,
)
mc_6 = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile_6,
)
ctx_6.attach_mc(mc_6)
# fail on disable_public_fqdn specified when enable_private_cluster is not specified
with self.assertRaises(InvalidArgumentValueError):
ctx_6.get_enable_private_cluster()
# custom value
ctx_7 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_public_fqdn": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
api_server_access_profile_7 = self.models.ManagedClusterAPIServerAccessProfile(
enable_private_cluster=False,
)
mc_7 = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile_7,
)
ctx_7.attach_mc(mc_7)
# fail on enable_public_fqdn specified when private cluster is not enabled
with self.assertRaises(InvalidArgumentValueError):
ctx_7.get_enable_private_cluster()
# custom value (update mode)
ctx_8 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"api_server_authorized_ip_ranges": "test_api_server_authorized_ip_ranges",
}
),
self.models,
DecoratorMode.UPDATE,
)
api_server_access_profile_8 = self.models.ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True,
)
mc_8 = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile_8,
)
ctx_8.attach_mc(mc_8)
# fail on mutually exclusive api_server_authorized_ip_ranges and private cluster
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_8.get_enable_private_cluster()
def test_get_disable_public_fqdn(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"disable_public_fqdn": False,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_disable_public_fqdn(), False)
api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile(
enable_private_cluster_public_fqdn=False,
)
mc = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile,
)
ctx_1.attach_mc(mc)
# fail on disable_public_fqdn specified when enable_private_cluster is not specified
with self.assertRaises(InvalidArgumentValueError):
self.assertEqual(ctx_1.get_disable_public_fqdn(), True)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"disable_public_fqdn": True,
"enable_public_fqdn": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
# fail on mutually exclusive disable_public_fqdn and enable_public_fqdn
with self.assertRaises(MutuallyExclusiveArgumentError):
self.assertEqual(ctx_2.get_disable_public_fqdn(), True)
# custom value
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"disable_public_fqdn": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
# fail on private cluster not enabled in update mode
with self.assertRaises(InvalidArgumentValueError):
self.assertEqual(ctx_3.get_disable_public_fqdn(), True)
# custom value
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"disable_public_fqdn": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
api_server_access_profile_4 = self.models.ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True,
private_dns_zone=CONST_PRIVATE_DNS_ZONE_NONE,
)
mc_4 = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile_4,
)
ctx_4.attach_mc(mc_4)
# fail on invalid private_dns_zone (none) when disable_public_fqdn is specified
with self.assertRaises(InvalidArgumentValueError):
self.assertEqual(ctx_4.get_disable_public_fqdn(), True)
def test_get_enable_public_fqdn(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_public_fqdn": False,
}
),
self.models,
DecoratorMode.UPDATE,
)
self.assertEqual(ctx_1.get_enable_public_fqdn(), False)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"disable_public_fqdn": True,
"enable_public_fqdn": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
# fail on mutually exclusive disable_public_fqdn and enable_public_fqdn
with self.assertRaises(MutuallyExclusiveArgumentError):
self.assertEqual(ctx_2.get_enable_public_fqdn(), True)
# custom value
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_public_fqdn": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
api_server_access_profile_3 = self.models.ManagedClusterAPIServerAccessProfile(
enable_private_cluster=False,
)
mc_3 = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile_3,
)
ctx_3.attach_mc(mc_3)
# fail on private cluster not enabled in update mode
with self.assertRaises(InvalidArgumentValueError):
self.assertEqual(ctx_3.get_enable_public_fqdn(), True)
def test_get_private_dns_zone(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"private_dns_zone": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_private_dns_zone(), None)
api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile(
private_dns_zone="test_private_dns_zone",
)
mc = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile,
)
ctx_1.attach_mc(mc)
# fail on private_dns_zone specified when enable_private_cluster is not specified
with self.assertRaises(InvalidArgumentValueError):
self.assertEqual(ctx_1.get_private_dns_zone(), "test_private_dns_zone")
# invalid parameter
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_private_cluster": True,
"private_dns_zone": "test_private_dns_zone",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on invalid private_dns_zone
with self.assertRaises(InvalidArgumentValueError):
self.assertEqual(ctx_2.get_private_dns_zone(), "test_private_dns_zone")
# invalid parameter
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"enable_private_cluster": True,
"private_dns_zone": CONST_PRIVATE_DNS_ZONE_SYSTEM,
"fqdn_subdomain": "test_fqdn_subdomain",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on invalid private_dns_zone when fqdn_subdomain is specified
with self.assertRaises(InvalidArgumentValueError):
self.assertEqual(ctx_3.get_private_dns_zone(), CONST_PRIVATE_DNS_ZONE_SYSTEM)
# custom value
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"disable_public_fqdn": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
api_server_access_profile_4 = self.models.ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True,
private_dns_zone=CONST_PRIVATE_DNS_ZONE_NONE,
)
mc_4 = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile_4,
)
ctx_4.attach_mc(mc_4)
# fail on invalid private_dns_zone (none) when disable_public_fqdn is specified
with self.assertRaises(InvalidArgumentValueError):
self.assertEqual(ctx_4.get_private_dns_zone(), CONST_PRIVATE_DNS_ZONE_NONE)
def test_get_user_assignd_identity_from_mc(self):
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({}),
self.models,
decorator_mode=DecoratorMode.CREATE,
)
user_assigned_identity_1 = {
"test_assign_identity": self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()
}
identity_1 = self.models.ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity_1,
)
mc_1 = self.models.ManagedCluster(
location="test_location",
identity=identity_1,
)
ctx_1.attach_mc(mc_1)
self.assertEqual(ctx_1.get_user_assignd_identity_from_mc(), "test_assign_identity")
def test_get_assign_kubelet_identity(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"assign_identity": "test_assign_identity",
"assign_kubelet_identity": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_assign_kubelet_identity(), None)
identity_profile = {
"kubeletidentity": self.models.UserAssignedIdentity(
resource_id="test_assign_kubelet_identity",
)
}
mc = self.models.ManagedCluster(
location="test_location",
identity_profile=identity_profile,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_assign_kubelet_identity(), "test_assign_kubelet_identity")
# invalid parameter
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"assign_identity": None,
"assign_kubelet_identity": "test_assign_kubelet_identity",
}
),
self.models,
DecoratorMode.CREATE,
)
# fail on assign_identity not specified
with self.assertRaises(RequiredArgumentMissingError):
self.assertEqual(
ctx_2.get_assign_kubelet_identity(),
"test_assign_kubelet_identity",
)
# update
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"assign_identity": None,
"assign_kubelet_identity": "test_assign_kubelet_identity",
}
),
self.models,
DecoratorMode.UPDATE,
)
user_assigned_identity_3 = {
"test_assign_identity": self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()
}
identity_3 = self.models.ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity_3,
)
mc_3 = self.models.ManagedCluster(
location="test_location",
identity=identity_3,
)
ctx_3.attach_mc(mc_3)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.prompt_y_n",
return_value=True,
):
self.assertEqual(
ctx_3.get_assign_kubelet_identity(),
"test_assign_kubelet_identity",
)
# update
ctx_4 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"assign_identity": None,
"assign_kubelet_identity": "test_assign_kubelet_identity",
}
),
self.models,
DecoratorMode.UPDATE,
)
# fail on assign_identity not specified and not existed in mc
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.prompt_y_n",
return_value=True,
), self.assertRaises(RequiredArgumentMissingError):
ctx_4.get_assign_kubelet_identity()
# update
ctx_5 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"assign_identity": None,
"assign_kubelet_identity": "test_assign_kubelet_identity",
}
),
self.models,
DecoratorMode.UPDATE,
)
# fail on no confirm
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.prompt_y_n",
return_value=False,
), self.assertRaises(DecoratorEarlyExitException):
ctx_5.get_assign_kubelet_identity()
def test_get_auto_upgrade_channel(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"auto_upgrade_channel": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_auto_upgrade_channel(), None)
auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile(upgrade_channel="test_auto_upgrade_channel")
mc = self.models.ManagedCluster(
location="test_location",
auto_upgrade_profile=auto_upgrade_profile,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_auto_upgrade_channel(), "test_auto_upgrade_channel")
def test_get_cluster_autoscaler_profile(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"cluster_autoscaler_profile": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_cluster_autoscaler_profile(), None)
mc = self.models.ManagedCluster(
location="test_location",
auto_scaler_profile="test_cluster_autoscaler_profile",
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_cluster_autoscaler_profile(),
"test_cluster_autoscaler_profile",
)
# custom value (update mode)
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"cluster_autoscaler_profile": {
"scan-interval": "30s",
"expander": "least-waste",
},
}
),
self.models,
DecoratorMode.UPDATE,
)
auto_scaler_profile_2 = self.models.ManagedClusterPropertiesAutoScalerProfile(
scan_interval="10s",
expander="random",
)
mc_2 = self.models.ManagedCluster(
location="test_location",
auto_scaler_profile=auto_scaler_profile_2,
)
ctx_2.attach_mc(mc_2)
self.assertEqual(
ctx_2._get_cluster_autoscaler_profile(read_only=True),
{
"scan-interval": "30s",
"expander": "least-waste",
},
)
self.assertEqual(
ctx_2.get_cluster_autoscaler_profile(),
{
"additional_properties": {},
"balance_similar_node_groups": None,
"expander": "least-waste",
"max_empty_bulk_delete": None,
"max_graceful_termination_sec": None,
"max_node_provision_time": None,
"max_total_unready_percentage": None,
"new_pod_scale_up_delay": None,
"ok_total_unready_count": None,
"scan_interval": "30s",
"scale_down_delay_after_add": None,
"scale_down_delay_after_delete": None,
"scale_down_delay_after_failure": None,
"scale_down_unneeded_time": None,
"scale_down_unready_time": None,
"scale_down_utilization_threshold": None,
"skip_nodes_with_local_storage": None,
"skip_nodes_with_system_pods": None,
},
)
def test_get_uptime_sla(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"uptime_sla": False,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_uptime_sla(), False)
sku = self.models.ManagedClusterSKU(
name="Basic",
tier="Paid",
)
mc = self.models.ManagedCluster(
location="test_location",
sku=sku,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_uptime_sla(), True)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"uptime_sla": False,
}
),
self.models,
DecoratorMode.UPDATE,
)
sku_2 = self.models.ManagedClusterSKU(
name="Basic",
tier="Paid",
)
mc_2 = self.models.ManagedCluster(
location="test_location",
sku=sku_2,
)
ctx_2.attach_mc(mc_2)
self.assertEqual(ctx_2.get_uptime_sla(), False)
# custom value
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"uptime_sla": True,
"no_uptime_sla": True,
}
),
self.models,
DecoratorMode.CREATE,
)
sku_3 = self.models.ManagedClusterSKU(
name="Basic",
tier="Free",
)
mc_3 = self.models.ManagedCluster(
location="test_location",
sku=sku_3,
)
ctx_3.attach_mc(mc_3)
self.assertEqual(ctx_3.get_uptime_sla(), False)
def test_get_no_uptime_sla(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"no_uptime_sla": False,
}
),
self.models,
DecoratorMode.UPDATE,
)
self.assertEqual(ctx_1.get_no_uptime_sla(), False)
sku = self.models.ManagedClusterSKU(
name="Basic",
tier="Paid",
)
mc = self.models.ManagedCluster(
location="test_location",
sku=sku,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_no_uptime_sla(), False)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"uptime_sla": True,
"no_uptime_sla": True,
}
),
self.models,
DecoratorMode.UPDATE,
)
sku_2 = self.models.ManagedClusterSKU(
name="Basic",
tier="Free",
)
mc_2 = self.models.ManagedCluster(
location="test_location",
sku=sku_2,
)
ctx_2.attach_mc(mc_2)
# fail on mutually exclusive uptime_sla and no_uptime_sla
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_2.get_uptime_sla()
# fail on mutually exclusive uptime_sla and no_uptime_sla
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_2.get_no_uptime_sla()
def test_get_disable_local_accounts(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"disable_local_accounts": False}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_disable_local_accounts(), False)
mc_1 = self.models.ManagedCluster(
location="test_location",
disable_local_accounts=True,
)
ctx_1.attach_mc(mc_1)
self.assertEqual(ctx_1.get_disable_local_accounts(), True)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"disable_local_accounts": True, "enable_local_accounts": True}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_2.get_disable_local_accounts(), True)
# custom value
ctx_3 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"disable_local_accounts": True, "enable_local_accounts": True}),
self.models,
DecoratorMode.UPDATE,
)
# fail on mutually exclusive disable_local_accounts and enable_local_accounts
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_3.get_disable_local_accounts()
def test_get_enable_local_accounts(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"enable_local_accounts": False}),
self.models,
DecoratorMode.UPDATE,
)
self.assertEqual(ctx_1.get_enable_local_accounts(), False)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"enable_local_accounts": True, "disable_local_accounts": True}),
self.models,
DecoratorMode.UPDATE,
)
# fail on mutually exclusive disable_local_accounts and enable_local_accounts
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_2.get_enable_local_accounts()
def test_get_edge_zone(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"edge_zone": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_edge_zone(), None)
extended_location = self.models.ExtendedLocation(
name="test_edge_zone",
type=self.models.ExtendedLocationTypes.EDGE_ZONE,
)
mc = self.models.ManagedCluster(
location="test_location",
extended_location=extended_location,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_edge_zone(), "test_edge_zone")
def test_get_node_resource_group(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"node_resource_group": None}),
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_node_resource_group(), None)
mc = self.models.ManagedCluster(
location="test_location",
node_resource_group="test_node_resource_group",
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_node_resource_group(), "test_node_resource_group")
def test_get_defender_config(self):
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{"enable_defender": True, "defender_config": get_test_data_file_path("defenderconfig.json")}
),
self.models,
DecoratorMode.CREATE,
)
defender_config_1 = ctx_1.get_defender_config()
ground_truth_defender_config_1 = self.models.ManagedClusterSecurityProfileAzureDefender(
enabled=True, log_analytics_workspace_resource_id="test_workspace_resource_id"
)
self.assertEqual(defender_config_1, ground_truth_defender_config_1)
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"enable_defender": True, "defender_config": "fake-path"}),
self.models,
DecoratorMode.CREATE,
)
# fail on invalid file path
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_defender_config()
def test_get_yes(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"yes": False}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_yes(), False)
def test_get_no_wait(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict({"no_wait": False}),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_no_wait(), False)
def test_get_aks_custom_headers(self):
# default
ctx_1 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"aks_custom_headers": None,
}
),
self.models,
DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_aks_custom_headers(), {})
# custom value
ctx_2 = AKSManagedClusterContext(
self.cmd,
AKSManagedClusterParamDict(
{
"aks_custom_headers": "abc=def,xyz=123",
}
),
self.models,
DecoratorMode.UPDATE,
)
self.assertEqual(ctx_2.get_aks_custom_headers(), {"abc": "def", "xyz": "123"})
service_principal_profile_2 = self.models.ManagedClusterServicePrincipalProfile(
client_id="test_service_principal", secret="test_client_secret"
)
mc = self.models.ManagedCluster(
location="test_location",
service_principal_profile=service_principal_profile_2,
)
ctx_2.attach_mc(mc)
self.assertEqual(ctx_2.get_aks_custom_headers(), {"abc": "def", "xyz": "123"})
class AKSManagedClusterCreateDecoratorTestCase(unittest.TestCase):
def setUp(self):
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.models = AKSManagedClusterModels(self.cmd, ResourceType.MGMT_CONTAINERSERVICE)
self.client = MockClient()
def test_init(self):
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
self.assertIsNotNone(dec_1.models)
self.assertIsNotNone(dec_1.context)
self.assertIsNotNone(dec_1.agentpool_decorator)
self.assertIsNotNone(dec_1.agentpool_context)
def test_ensure_mc(self):
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1._ensure_mc(None)
mc_1 = self.models.ManagedCluster(location="test_location")
# fail on inconsistent mc with internal context
with self.assertRaises(CLIInternalError):
dec_1._ensure_mc(mc_1)
def test_remove_restore_defaults_in_mc(self):
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1._remove_defaults_in_mc(None)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1._restore_defaults_in_mc(None)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
dec_mc_1 = dec_1._remove_defaults_in_mc(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location")
ground_truth_mc_1.additional_properties = None
self.assertEqual(dec_mc_1, ground_truth_mc_1)
self.assertEqual(dec_1.context.get_intermediate("defaults_in_mc"), {"additional_properties": {}})
dec_mc_2 = dec_1._restore_defaults_in_mc(dec_mc_1)
ground_truth_mc_2 = self.models.ManagedCluster(location="test_location")
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_init_mc(self):
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"location": "test_location",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
dec_mc = dec_1.init_mc()
ground_truth_mc = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc, ground_truth_mc)
self.assertEqual(dec_mc, dec_1.context.mc)
def test_set_up_agentpool_profile(self):
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"nodepool_name": "test_np_name",
"node_vm_size": "Standard_DSx_vy",
"os_sku": None,
"snapshot_id": "test_snapshot_id",
"vnet_subnet_id": "test_vnet_subnet_id",
"pod_subnet_id": "test_pod_subnet_id",
"enable_node_public_ip": True,
"node_public_ip_prefix_id": "test_node_public_ip_prefix_id",
"enable_cluster_autoscaler": True,
"min_count": 5,
"max_count": 20,
"node_count": 10,
"nodepool_tags": {"k1": "v1"},
"nodepool_labels": {"k1": "v1", "k2": "v2"},
"node_osdisk_size": 100,
"node_osdisk_type": "test_os_disk_type",
"vm_set_type": None,
"zones": ["tz1", "tz2"],
"ppg": "test_ppg_id",
"max_pods": 50,
"enable_encryption_at_host": True,
"enable_ultra_ssd": True,
"enable_fips_image": True,
"kubelet_config": None,
"linux_os_config": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
mock_snapshot = Mock(
kubernetes_version="",
os_sku="snapshot_os_sku",
os_type=None,
vm_size="snapshot_vm_size",
)
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.get_snapshot_by_snapshot_id",
return_value=mock_snapshot,
):
dec_mc_1 = dec_1.set_up_agentpool_profile(mc_1)
ground_truth_agentpool_profile_1 = self.models.ManagedClusterAgentPoolProfile(
name="test_np_name",
orchestrator_version="",
vm_size="Standard_DSx_vy",
os_type=CONST_DEFAULT_NODE_OS_TYPE,
os_sku="snapshot_os_sku",
creation_data=self.models.CreationData(source_resource_id="test_snapshot_id"),
vnet_subnet_id="test_vnet_subnet_id",
pod_subnet_id="test_pod_subnet_id",
enable_node_public_ip=True,
node_public_ip_prefix_id="test_node_public_ip_prefix_id",
enable_auto_scaling=True,
min_count=5,
max_count=20,
count=10,
node_labels={"k1": "v1", "k2": "v2"},
tags={"k1": "v1"},
node_taints=[],
os_disk_size_gb=100,
os_disk_type="test_os_disk_type",
upgrade_settings=self.models.AgentPoolUpgradeSettings(),
type=CONST_VIRTUAL_MACHINE_SCALE_SETS,
availability_zones=["tz1", "tz2"],
proximity_placement_group_id="test_ppg_id",
max_pods=50,
enable_encryption_at_host=True,
enable_ultra_ssd=True,
enable_fips=True,
mode=CONST_NODEPOOL_MODE_SYSTEM,
)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location")
ground_truth_mc_1.agent_pool_profiles = [ground_truth_agentpool_profile_1]
self.assertEqual(dec_mc_1, ground_truth_mc_1)
def test_set_up_mc_properties(self):
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"name": "test_cluster",
"resource_group_name": "test_rg_name",
"tags": {"t1": "v1"},
"kubernetes_version": "test_kubernetes_version",
"dns_name_prefix": None,
"node_osdisk_diskencryptionset_id": "test_node_osdisk_diskencryptionset_id",
"disable_local_accounts": True,
"disable_rbac": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_mc_properties(None)
mock_profile = Mock(get_subscription_id=Mock(return_value="1234-5678-9012"))
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.Profile",
return_value=mock_profile,
):
dec_mc_1 = dec_1.set_up_mc_properties(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
tags={"t1": "v1"},
kubernetes_version="test_kubernetes_version",
dns_prefix="testcluste-testrgname-1234-5",
disk_encryption_set_id="test_node_osdisk_diskencryptionset_id",
disable_local_accounts=True,
enable_rbac=False,
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
def test_set_up_linux_profile(self):
import paramiko
key = paramiko.RSAKey.generate(2048)
public_key = "{} {}".format(key.get_name(), key.get_base64())
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"admin_username": "azureuser",
"no_ssh_key": False,
"ssh_key_value": public_key,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_linux_profile(None)
dec_mc_1 = dec_1.set_up_linux_profile(mc_1)
ssh_config_1 = self.models.ContainerServiceSshConfiguration(
public_keys=[self.models.ContainerServiceSshPublicKey(key_data=public_key)]
)
linux_profile_1 = self.models.ContainerServiceLinuxProfile(admin_username="azureuser", ssh=ssh_config_1)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location", linux_profile=linux_profile_1)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"admin_username": "test_user",
"no_ssh_key": True,
"ssh_key_value": "test_key",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.set_up_linux_profile(mc_2)
ground_truth_mc_2 = self.models.ManagedCluster(location="test_location")
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_windows_profile(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"windows_admin_username": None,
"windows_admin_password": None,
"enable_ahub": False,
"enable_windows_gmsa": False,
"gmsa_dns_server": None,
"gmsa_root_domain_name": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_windows_profile(None)
dec_mc_1 = dec_1.set_up_windows_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location")
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"windows_admin_username": "test_win_admin_name",
"windows_admin_password": None,
"enable_ahub": True,
"enable_windows_gmsa": True,
"gmsa_dns_server": "test_gmsa_dns_server",
"gmsa_root_domain_name": "test_gmsa_root_domain_name",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.prompt_pass",
return_value="test_win_admin_pd",
):
dec_mc_2 = dec_2.set_up_windows_profile(mc_2)
gmsa_profile_2 = self.models.WindowsGmsaProfile(
enabled=True,
dns_server="test_gmsa_dns_server",
root_domain_name="test_gmsa_root_domain_name",
)
windows_profile_2 = self.models.ManagedClusterWindowsProfile(
# [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="fake secrets in unit test")]
admin_username="test_win_admin_name",
admin_password="test_win_admin_pd",
license_type="Windows_Server",
gmsa_profile=gmsa_profile_2,
)
ground_truth_mc_2 = self.models.ManagedCluster(location="test_location", windows_profile=windows_profile_2)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
# custom value
dec_3 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"windows_admin_username": None,
"windows_admin_password": None,
"enable_ahub": True,
"enable_windows_gmsa": True,
"gmsa_dns_server": None,
"gmsa_root_domain_name": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(location="test_location")
dec_3.context.attach_mc(mc_3)
with self.assertRaises(RequiredArgumentMissingError):
dec_3.set_up_windows_profile(mc_3)
def test_set_up_service_principal_profile(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": True,
"service_principal": None,
"client_secret": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_service_principal_profile(None)
dec_mc_1 = dec_1.set_up_service_principal_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location")
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"name": "test_name",
"resource_group_name": "test_rg_name",
"enable_managed_identity": True,
"service_principal": "test_service_principal",
"client_secret": "test_client_secret",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
dec_2.context.set_intermediate("subscription_id", "1234-5678", overwrite_exists=True)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.get_rg_location",
return_value="test_location",
), patch(
"azure.cli.command_modules.acs._graph.get_graph_rbac_management_client",
return_value=None,
):
dec_mc_2 = dec_2.set_up_service_principal_profile(mc_2)
service_principal_profile_2 = self.models.ManagedClusterServicePrincipalProfile(
client_id="test_service_principal", secret="test_client_secret"
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
service_principal_profile=service_principal_profile_2,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_process_add_role_assignment_for_vnet_subnet(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"vnet_subnet_id": None,
"skip_subnet_role_assignment": False,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.process_add_role_assignment_for_vnet_subnet(None)
dec_1.process_add_role_assignment_for_vnet_subnet(mc_1)
self.assertEqual(
dec_1.context.get_intermediate("need_post_creation_vnet_permission_granting"),
False,
)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"vnet_subnet_id": "test_vnet_subnet_id",
"skip_subnet_role_assignment": False,
"assign_identity": None,
"yes": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.subnet_role_assignment_exists",
return_value=False,
):
dec_2.process_add_role_assignment_for_vnet_subnet(mc_2)
self.assertEqual(
dec_2.context.get_intermediate("need_post_creation_vnet_permission_granting"),
True,
)
# custom value
dec_3 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"vnet_subnet_id": "test_vnet_subnet_id",
"skip_subnet_role_assignment": False,
"assign_identity": None,
"yes": False,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(location="test_location")
dec_3.context.attach_mc(mc_3)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.subnet_role_assignment_exists",
return_value=False,
), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.prompt_y_n",
return_value=False,
):
# fail on user does not confirm
with self.assertRaises(DecoratorEarlyExitException):
dec_3.process_add_role_assignment_for_vnet_subnet(mc_3)
self.assertEqual(
dec_3.context.get_intermediate("need_post_creation_vnet_permission_granting"),
None,
)
# custom value
dec_4 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"vnet_subnet_id": "test_vnet_subnet_id",
"skip_subnet_role_assignment": False,
"assign_identity": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
service_principal_profile_4 = self.models.ManagedClusterServicePrincipalProfile(
client_id="test_service_principal", secret="test_client_secret"
)
mc_4 = self.models.ManagedCluster(
location="test_location",
service_principal_profile=service_principal_profile_4,
)
dec_4.context.attach_mc(mc_4)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.subnet_role_assignment_exists",
return_value=False,
), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.add_role_assignment",
return_value=True,
) as add_role_assignment:
dec_4.process_add_role_assignment_for_vnet_subnet(mc_4)
add_role_assignment.assert_called_once_with(
self.cmd,
"Network Contributor",
"test_service_principal",
scope="test_vnet_subnet_id",
)
self.assertEqual(
dec_4.context.get_intermediate("need_post_creation_vnet_permission_granting"),
False,
)
# custom value
identity_obj = Mock(
client_id="test_client_id",
)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.AKSManagedClusterContext.get_identity_by_msi_client",
return_value=identity_obj,
):
dec_5 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": True,
"vnet_subnet_id": "test_vnet_subnet_id",
"skip_subnet_role_assignment": False,
"assign_identity": "test_assign_identity",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_5 = self.models.ManagedCluster(
location="test_location",
)
dec_5.context.attach_mc(mc_5)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.subnet_role_assignment_exists",
return_value=False,
), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.add_role_assignment",
return_value=False,
) as add_role_assignment:
dec_5.process_add_role_assignment_for_vnet_subnet(mc_5)
add_role_assignment.assert_called_once_with(
self.cmd,
"Network Contributor",
"test_client_id",
scope="test_vnet_subnet_id",
)
self.assertEqual(
dec_5.context.get_intermediate("need_post_creation_vnet_permission_granting"),
False,
)
def test_process_attach_acr(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"attach_acr": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.process_attach_acr(None)
dec_1.process_attach_acr(mc_1)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"attach_acr": "test_attach_acr",
"enable_managed_identity": True,
"no_wait": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
# fail on mutually exclusive attach_acr, enable_managed_identity and no_wait
with self.assertRaises(MutuallyExclusiveArgumentError):
dec_2.process_attach_acr(mc_2)
# custom value
dec_3 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"attach_acr": "test_attach_acr",
"enable_managed_identity": False,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(location="test_location")
dec_3.context.attach_mc(mc_3)
# fail on service_principal/client_secret not specified
with self.assertRaises(RequiredArgumentMissingError):
dec_3.process_attach_acr(mc_3)
service_principal_profile_3 = self.models.ManagedClusterServicePrincipalProfile(
client_id="test_service_principal", secret="test_client_secret"
)
mc_3.service_principal_profile = service_principal_profile_3
dec_3.context.set_intermediate("subscription_id", "test_subscription_id")
registry = Mock(id="test_registry_id")
with patch(
"azure.cli.command_modules.acs._roleassignments.get_resource_by_name",
return_value=registry,
), patch("azure.cli.command_modules.acs._roleassignments.ensure_aks_acr_role_assignment") as ensure_assignment:
dec_3.process_attach_acr(mc_3)
ensure_assignment.assert_called_once_with(self.cmd, "test_service_principal", "test_registry_id", False, True)
def test_set_up_network_profile(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"load_balancer_sku": None,
"load_balancer_managed_outbound_ip_count": None,
"load_balancer_outbound_ips": None,
"load_balancer_outbound_ip_prefixes": None,
"load_balancer_outbound_ports": None,
"load_balancer_idle_timeout": None,
"outbound_type": None,
"network_plugin": None,
"pod_cidr": None,
"service_cidr": None,
"dns_service_ip": None,
"docker_bridge_cidr": None,
"network_policy": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_network_profile(None)
dec_mc_1 = dec_1.set_up_network_profile(mc_1)
network_profile_1 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet", # default value in SDK
pod_cidr="10.244.0.0/16", # default value in SDK
service_cidr="10.0.0.0/16", # default value in SDK
dns_service_ip="10.0.0.10", # default value in SDK
docker_bridge_cidr="172.17.0.1/16", # default value in SDK
load_balancer_sku="standard",
outbound_type="loadBalancer",
)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location", network_profile=network_profile_1)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"load_balancer_sku": None,
"load_balancer_managed_outbound_ip_count": 3,
"load_balancer_outbound_ips": "test_ip_1,test_ip_2",
"load_balancer_outbound_ip_prefixes": None,
"load_balancer_outbound_ports": 5,
"load_balancer_idle_timeout": None,
"outbound_type": None,
"network_plugin": "kubenet",
"pod_cidr": "10.246.0.0/16",
"service_cidr": None,
"dns_service_ip": None,
"docker_bridge_cidr": None,
"network_policy": None,
"nat_gateway_managed_outbound_ip_count": 10,
"nat_gateway_idle_timeout": 20,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.set_up_network_profile(mc_2)
load_balancer_profile_2 = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile(
managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=3
),
outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPs(
public_i_ps=[
self.models.load_balancer_models.ResourceReference(id="test_ip_1"),
self.models.load_balancer_models.ResourceReference(id="test_ip_2"),
]
),
allocated_outbound_ports=5,
)
nat_gateway_profile_2 = self.models.nat_gateway_models.ManagedClusterNATGatewayProfile(
managed_outbound_ip_profile=self.models.nat_gateway_models.ManagedClusterManagedOutboundIPProfile(count=10),
idle_timeout_in_minutes=20,
)
network_profile_2 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet",
pod_cidr="10.246.0.0/16",
service_cidr=None, # overwritten to None
dns_service_ip=None, # overwritten to None
docker_bridge_cidr=None, # overwritten to None
load_balancer_sku="standard",
outbound_type="loadBalancer",
load_balancer_profile=load_balancer_profile_2,
nat_gateway_profile=nat_gateway_profile_2,
)
ground_truth_mc_2 = self.models.ManagedCluster(location="test_location", network_profile=network_profile_2)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
# custom value
dec_3 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"load_balancer_sku": "basic",
"load_balancer_managed_outbound_ip_count": 5,
"load_balancer_outbound_ips": None,
"load_balancer_outbound_ip_prefixes": "test_ip_prefix_1,test_ip_prefix_2",
"load_balancer_outbound_ports": None,
"load_balancer_idle_timeout": 20,
"outbound_type": None,
"network_plugin": None,
"pod_cidr": None,
"service_cidr": None,
"dns_service_ip": None,
"docker_bridge_cidr": None,
"network_policy": None,
"nat_gateway_managed_outbound_ip_count": None,
"nat_gateway_idle_timeout": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(location="test_location")
dec_3.context.attach_mc(mc_3)
dec_mc_3 = dec_3.set_up_network_profile(mc_3)
network_profile_3 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet", # default value in SDK
pod_cidr="10.244.0.0/16", # default value in SDK
service_cidr="10.0.0.0/16", # default value in SDK
dns_service_ip="10.0.0.10", # default value in SDK
docker_bridge_cidr="172.17.0.1/16", # default value in SDK
load_balancer_sku="basic",
outbound_type="loadBalancer",
load_balancer_profile=None, # profile dropped when lb sku is basic
)
ground_truth_mc_3 = self.models.ManagedCluster(location="test_location", network_profile=network_profile_3)
self.assertEqual(dec_mc_3, ground_truth_mc_3)
def test_build_http_application_routing_addon_profile(self):
# default
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
http_application_routing_addon_profile = dec_1.build_http_application_routing_addon_profile()
ground_truth_http_application_routing_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
)
self.assertEqual(
http_application_routing_addon_profile,
ground_truth_http_application_routing_addon_profile,
)
def test_build_kube_dashboard_addon_profile(self):
# default
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
kube_dashboard_addon_profile = dec_1.build_kube_dashboard_addon_profile()
ground_truth_kube_dashboard_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
)
self.assertEqual(
kube_dashboard_addon_profile,
ground_truth_kube_dashboard_addon_profile,
)
def test_build_monitoring_addon_profile(self):
# default
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"location": "test_location",
"enable_addons": "monitoring",
"workspace_resource_id": "test_workspace_resource_id",
"enable_msi_auth_for_monitoring": False,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mock_profile = Mock(get_subscription_id=Mock(return_value="1234-5678-9012"))
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.Profile",
return_value=mock_profile,
), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.ensure_container_insights_for_monitoring",
return_value=None,
):
self.assertEqual(dec_1.context.get_intermediate("monitoring_addon_enabled"), None)
monitoring_addon_profile = dec_1.build_monitoring_addon_profile()
ground_truth_monitoring_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/test_workspace_resource_id",
CONST_MONITORING_USING_AAD_MSI_AUTH: "False",
},
)
self.assertEqual(monitoring_addon_profile, ground_truth_monitoring_addon_profile)
self.assertEqual(dec_1.context.get_intermediate("monitoring_addon_enabled"), True)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_addons": "",
"workspace_resource_id": "test_workspace_resource_id",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on enable_addons (monitoring) not specified
with self.assertRaises(RequiredArgumentMissingError):
dec_2.build_monitoring_addon_profile()
def test_build_azure_policy_addon_profile(self):
# default
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
azure_policy_addon_profile = dec_1.build_azure_policy_addon_profile()
ground_truth_azure_policy_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
)
self.assertEqual(azure_policy_addon_profile, ground_truth_azure_policy_addon_profile)
def test_build_virtual_node_addon_profile(self):
# default
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{"aci_subnet_name": "test_aci_subnet_name"},
ResourceType.MGMT_CONTAINERSERVICE,
)
self.assertEqual(dec_1.context.get_intermediate("virtual_node_addon_enabled"), None)
virtual_node_addon_profile = dec_1.build_virtual_node_addon_profile()
ground_truth_virtual_node_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: "test_aci_subnet_name"},
)
self.assertEqual(virtual_node_addon_profile, ground_truth_virtual_node_addon_profile)
self.assertEqual(dec_1.context.get_intermediate("virtual_node_addon_enabled"), True)
def test_build_ingress_appgw_addon_profile(self):
# default
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
self.assertEqual(dec_1.context.get_intermediate("ingress_appgw_addon_enabled"), None)
ingress_appgw_addon_profile = dec_1.build_ingress_appgw_addon_profile()
ground_truth_ingress_appgw_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={},
)
self.assertEqual(
ingress_appgw_addon_profile,
ground_truth_ingress_appgw_addon_profile,
)
self.assertEqual(dec_1.context.get_intermediate("ingress_appgw_addon_enabled"), True)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"appgw_name": "test_appgw_name",
"appgw_subnet_cidr": "test_appgw_subnet_cidr",
"appgw_id": "test_appgw_id",
"appgw_subnet_id": "test_appgw_subnet_id",
"appgw_watch_namespace": "test_appgw_watch_namespace",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
self.assertEqual(dec_2.context.get_intermediate("ingress_appgw_addon_enabled"), None)
ingress_appgw_addon_profile = dec_2.build_ingress_appgw_addon_profile()
ground_truth_ingress_appgw_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME: "test_appgw_name",
CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_cidr",
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID: "test_appgw_id",
CONST_INGRESS_APPGW_SUBNET_ID: "test_appgw_subnet_id",
CONST_INGRESS_APPGW_WATCH_NAMESPACE: "test_appgw_watch_namespace",
},
)
self.assertEqual(
ingress_appgw_addon_profile,
ground_truth_ingress_appgw_addon_profile,
)
self.assertEqual(dec_2.context.get_intermediate("ingress_appgw_addon_enabled"), True)
def test_build_confcom_addon_profile(self):
# default
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
confcom_addon_profile = dec_1.build_confcom_addon_profile()
ground_truth_confcom_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"},
)
self.assertEqual(confcom_addon_profile, ground_truth_confcom_addon_profile)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{"enable_sgxquotehelper": True},
ResourceType.MGMT_CONTAINERSERVICE,
)
confcom_addon_profile = dec_2.build_confcom_addon_profile()
ground_truth_confcom_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "true"},
)
self.assertEqual(confcom_addon_profile, ground_truth_confcom_addon_profile)
def test_build_open_service_mesh_addon_profile(self):
# default
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
open_service_mesh_addon_profile = dec_1.build_open_service_mesh_addon_profile()
ground_truth_open_service_mesh_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={},
)
self.assertEqual(
open_service_mesh_addon_profile,
ground_truth_open_service_mesh_addon_profile,
)
def test_build_azure_keyvault_secrets_provider_addon_profile(self):
# default
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
azure_keyvault_secrets_provider_addon_profile = dec_1.build_azure_keyvault_secrets_provider_addon_profile()
ground_truth_azure_keyvault_secrets_provider_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_SECRET_ROTATION_ENABLED: "false",
CONST_ROTATION_POLL_INTERVAL: "2m",
},
)
self.assertEqual(
azure_keyvault_secrets_provider_addon_profile,
ground_truth_azure_keyvault_secrets_provider_addon_profile,
)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{"enable_secret_rotation": True, "rotation_poll_interval": "30m"},
ResourceType.MGMT_CONTAINERSERVICE,
)
azure_keyvault_secrets_provider_addon_profile = dec_2.build_azure_keyvault_secrets_provider_addon_profile()
ground_truth_azure_keyvault_secrets_provider_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_SECRET_ROTATION_ENABLED: "true",
CONST_ROTATION_POLL_INTERVAL: "30m",
},
)
self.assertEqual(
azure_keyvault_secrets_provider_addon_profile,
ground_truth_azure_keyvault_secrets_provider_addon_profile,
)
def test_set_up_addon_profiles(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_addons": None,
"workspace_resource_id": None,
"enable_msi_auth_for_monitoring": None,
"aci_subnet_name": None,
"appgw_name": None,
"appgw_subnet_cidr": None,
"appgw_id": None,
"appgw_subnet_id": None,
"appgw_watch_namespace": None,
"enable_sgxquotehelper": False,
"enable_secret_rotation": False,
"rotation_poll_interval": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_addon_profiles(None)
dec_mc_1 = dec_1.set_up_addon_profiles(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location", addon_profiles={})
self.assertEqual(dec_mc_1, ground_truth_mc_1)
self.assertEqual(dec_1.context.get_intermediate("monitoring_addon_enabled"), None)
self.assertEqual(dec_1.context.get_intermediate("virtual_node_addon_enabled"), None)
self.assertEqual(dec_1.context.get_intermediate("ingress_appgw_addon_enabled"), None)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"location": "test_location",
"vnet_subnet_id": "test_vnet_subnet_id",
"enable_addons": "http_application_routing,monitoring,virtual-node,kube-dashboard,azure-policy,ingress-appgw,confcom,open-service-mesh,azure-keyvault-secrets-provider",
"workspace_resource_id": "test_workspace_resource_id",
"enable_msi_auth_for_monitoring": False,
"aci_subnet_name": "test_aci_subnet_name",
"appgw_name": "test_appgw_name",
"appgw_subnet_cidr": "test_appgw_subnet_cidr",
"appgw_id": "test_appgw_id",
"appgw_subnet_id": "test_appgw_subnet_id",
"appgw_watch_namespace": "test_appgw_watch_namespace",
"enable_sgxquotehelper": True,
"enable_secret_rotation": True,
"rotation_poll_interval": "30m",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
mock_profile = Mock(get_subscription_id=Mock(return_value="1234-5678-9012"))
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.Profile",
return_value=mock_profile,
), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.ensure_container_insights_for_monitoring",
return_value=None,
):
dec_mc_2 = dec_2.set_up_addon_profiles(mc_2)
addon_profiles_2 = {
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
),
CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/test_workspace_resource_id",
CONST_MONITORING_USING_AAD_MSI_AUTH: "False",
},
),
CONST_VIRTUAL_NODE_ADDON_NAME
+ dec_2.context.get_virtual_node_addon_os_type(): self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: "test_aci_subnet_name"},
),
CONST_KUBE_DASHBOARD_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
),
CONST_AZURE_POLICY_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
),
CONST_INGRESS_APPGW_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME: "test_appgw_name",
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID: "test_appgw_id",
CONST_INGRESS_APPGW_SUBNET_ID: "test_appgw_subnet_id",
CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_cidr",
CONST_INGRESS_APPGW_WATCH_NAMESPACE: "test_appgw_watch_namespace",
},
),
CONST_CONFCOM_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "true"},
),
CONST_OPEN_SERVICE_MESH_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={},
),
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_SECRET_ROTATION_ENABLED: "true",
CONST_ROTATION_POLL_INTERVAL: "30m",
},
),
}
ground_truth_mc_2 = self.models.ManagedCluster(location="test_location", addon_profiles=addon_profiles_2)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
self.assertEqual(dec_2.context.get_intermediate("monitoring_addon_enabled"), True)
self.assertEqual(dec_2.context.get_intermediate("virtual_node_addon_enabled"), True)
self.assertEqual(dec_2.context.get_intermediate("ingress_appgw_addon_enabled"), True)
# custom value
dec_3 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_addons": "test_enable_addons",
"workspace_resource_id": None,
"aci_subnet_name": None,
"appgw_name": None,
"appgw_subnet_cidr": None,
"appgw_id": None,
"appgw_subnet_id": None,
"appgw_watch_namespace": None,
"enable_sgxquotehelper": False,
"useAADAuth": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(location="test_location")
dec_3.context.attach_mc(mc_3)
# fail on invalid enable_addons
with self.assertRaises(InvalidArgumentValueError):
dec_3.set_up_addon_profiles(mc_3)
# custom value
dec_4 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_addons": "virtual-node",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_4 = self.models.ManagedCluster(location="test_location")
dec_4.context.attach_mc(mc_4)
# fail on aci_subnet_name/vnet_subnet_id not specified
with self.assertRaises(RequiredArgumentMissingError):
dec_4.set_up_addon_profiles(mc_4)
def test_set_up_aad_profile(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_aad": False,
"aad_client_app_id": None,
"aad_server_app_id": None,
"aad_server_app_secret": None,
"aad_tenant_id": None,
"aad_admin_group_object_ids": None,
"enable_azure_rbac": False,
"disable_rbac": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_aad_profile(None)
dec_mc_1 = dec_1.set_up_aad_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_aad": True,
"aad_client_app_id": None,
"aad_server_app_id": None,
"aad_server_app_secret": None,
"aad_tenant_id": None,
"aad_admin_group_object_ids": "test_value_1test_value_2",
"enable_azure_rbac": True,
"disable_rbac": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.set_up_aad_profile(mc_2)
aad_profile_2 = self.models.ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=True,
admin_group_object_i_ds=["test_value_1test_value_2"],
)
ground_truth_mc_2 = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_2)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
# custom value
dec_3 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_aad": False,
"aad_client_app_id": "test_aad_client_app_id",
"aad_server_app_id": None,
"aad_server_app_secret": None,
"aad_tenant_id": None,
"aad_admin_group_object_ids": None,
"enable_azure_rbac": False,
"disable_rbac": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(location="test_location")
dec_3.context.attach_mc(mc_3)
profile = Mock(get_login_credentials=Mock(return_value=(None, None, "test_aad_tenant_id")))
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.Profile",
return_value=profile,
):
dec_mc_3 = dec_3.set_up_aad_profile(mc_3)
aad_profile_3 = self.models.ManagedClusterAADProfile(
client_app_id="test_aad_client_app_id",
tenant_id="test_aad_tenant_id",
)
ground_truth_mc_3 = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_3)
self.assertEqual(dec_mc_3, ground_truth_mc_3)
# custom value
dec_4 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_aad": True,
"aad_client_app_id": None,
"aad_server_app_id": None,
"aad_server_app_secret": None,
"aad_tenant_id": None,
"aad_admin_group_object_ids": None,
"enable_azure_rbac": True,
"disable_rbac": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_4 = self.models.ManagedCluster(location="test_location")
dec_4.context.attach_mc(mc_4)
# fail on mutually exclusive enable_azure_rbac and disable_rbac
with self.assertRaises(MutuallyExclusiveArgumentError):
dec_4.set_up_aad_profile(mc_4)
def test_set_up_api_server_access_profile(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"api_server_authorized_ip_ranges": None,
"enable_private_cluster": False,
"disable_public_fqdn": False,
"private_dns_zone": None,
"fqdn_subdomain": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_api_server_access_profile(None)
dec_mc_1 = dec_1.set_up_api_server_access_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"api_server_authorized_ip_ranges": "test_ip_1, test_ip_2",
"enable_private_cluster": False,
"disable_public_fqdn": False,
"private_dns_zone": None,
"fqdn_subdomain": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.set_up_api_server_access_profile(mc_2)
api_server_access_profile_2 = self.models.ManagedClusterAPIServerAccessProfile(
authorized_ip_ranges=["test_ip_1", "test_ip_2"],
enable_private_cluster=None,
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile_2,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
# custom value
dec_3 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"api_server_authorized_ip_ranges": None,
"enable_private_cluster": True,
"disable_public_fqdn": True,
"private_dns_zone": None,
"fqdn_subdomain": "test_fqdn_subdomain",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(location="test_location")
dec_3.context.attach_mc(mc_3)
dec_mc_3 = dec_3.set_up_api_server_access_profile(mc_3)
api_server_access_profile_3 = self.models.ManagedClusterAPIServerAccessProfile(
authorized_ip_ranges=[],
enable_private_cluster=True,
enable_private_cluster_public_fqdn=False,
)
ground_truth_mc_3 = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile_3,
fqdn_subdomain="test_fqdn_subdomain",
)
self.assertEqual(dec_mc_3, ground_truth_mc_3)
# invalid value
dec_4 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"api_server_authorized_ip_ranges": "test_api_server_authorized_ip_ranges",
"enable_private_cluster": True,
"disable_public_fqdn": False,
"private_dns_zone": None,
"fqdn_subdomain": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_4 = self.models.ManagedCluster(location="test_location")
dec_4.context.attach_mc(mc_4)
# fail on mutually exclusive enable_private_cluster and api_server_authorized_ip_ranges
with self.assertRaises(MutuallyExclusiveArgumentError):
dec_4.set_up_api_server_access_profile(mc_4)
# invalid value
dec_5 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"api_server_authorized_ip_ranges": None,
"enable_private_cluster": True,
"disable_public_fqdn": False,
"private_dns_zone": CONST_PRIVATE_DNS_ZONE_SYSTEM,
"fqdn_subdomain": "test_fqdn_subdomain",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_5 = self.models.ManagedCluster(location="test_location")
dec_5.context.attach_mc(mc_5)
# fail on invalid private_dns_zone when fqdn_subdomain is specified
with self.assertRaises(InvalidArgumentValueError):
dec_5.set_up_api_server_access_profile(mc_5)
def test_set_up_identity(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": False,
"assign_identity": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_identity(None)
dec_mc_1 = dec_1.set_up_identity(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": True,
"assign_identity": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.set_up_identity(mc_2)
identity_2 = self.models.ManagedClusterIdentity(
type="SystemAssigned",
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
identity=identity_2,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
# custom value
dec_3 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": True,
"assign_identity": "test_assign_identity",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(location="test_location")
dec_3.context.attach_mc(mc_3)
dec_mc_3 = dec_3.set_up_identity(mc_3)
user_assigned_identity_3 = {
"test_assign_identity": self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()
}
identity_3 = self.models.ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity_3,
)
ground_truth_mc_3 = self.models.ManagedCluster(
location="test_location",
identity=identity_3,
)
self.assertEqual(dec_mc_3, ground_truth_mc_3)
# invalid value
dec_4 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": False,
"assign_identity": "test_assign_identity",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_4 = self.models.ManagedCluster(location="test_location")
dec_4.context.attach_mc(mc_4)
# fail on enable_managed_identity not specified
with self.assertRaises(RequiredArgumentMissingError):
dec_4.set_up_identity(mc_4)
def test_set_up_identity_profile(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"assign_identity": None,
"assign_kubelet_identity": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_identity_profile(None)
dec_mc_1 = dec_1.set_up_identity_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
identity_obj_1 = Mock(
client_id="test_assign_kubelet_identity_client_id",
principal_id="test_assign_kubelet_identity_object_id",
)
identity_obj_2 = Mock(
client_id="test_assign_identity_client_id",
principal_id="test_assign_identity_object_id",
)
mock_ensure_method = Mock()
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.AKSManagedClusterContext.get_identity_by_msi_client",
side_effect=[identity_obj_1, identity_obj_2],
), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.ensure_cluster_identity_permission_on_kubelet_identity"
) as mock_ensure_method:
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": True,
"assign_identity": "test_assign_identity",
"assign_kubelet_identity": "test_assign_kubelet_identity",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.set_up_identity_profile(mc_2)
identity_profile_2 = {
"kubeletidentity": self.models.UserAssignedIdentity(
resource_id="test_assign_kubelet_identity",
client_id="test_assign_kubelet_identity_client_id",
object_id="test_assign_kubelet_identity_object_id",
)
}
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
identity_profile=identity_profile_2,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
mock_ensure_method.assert_called_once_with(
self.cmd,
"test_assign_identity_object_id",
"test_assign_kubelet_identity",
)
def test_set_up_auto_upgrade_profile(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"auto_upgrade_channel": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_auto_upgrade_profile(None)
dec_mc_1 = dec_1.set_up_auto_upgrade_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"auto_upgrade_channel": "test_auto_upgrade_channel",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.set_up_auto_upgrade_profile(mc_2)
auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile(
upgrade_channel="test_auto_upgrade_channel",
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
auto_upgrade_profile=auto_upgrade_profile,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_auto_scaler_profile(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"cluster_autoscaler_profile": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_auto_scaler_profile(None)
dec_mc_1 = dec_1.set_up_auto_scaler_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"cluster_autoscaler_profile": {"expander": "random"},
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.set_up_auto_scaler_profile(mc_2)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
auto_scaler_profile={"expander": "random"},
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_sku(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"uptime_sla": False,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_sku(None)
dec_mc_1 = dec_1.set_up_sku(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"uptime_sla": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.set_up_sku(mc_2)
sku = self.models.ManagedClusterSKU(
name="Basic",
tier="Paid",
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
sku=sku,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_extended_location(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"edge_zone": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_extended_location(None)
dec_mc_1 = dec_1.set_up_extended_location(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{
"edge_zone": "test_edge_zone",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.set_up_extended_location(mc_2)
extended_location = self.models.ExtendedLocation(
name="test_edge_zone",
type=self.models.ExtendedLocationTypes.EDGE_ZONE,
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
extended_location=extended_location,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_node_resource_group(self):
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{"node_resource_group": "test_node_resource_group"},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_node_resource_group(None)
dec_mc_1 = dec_1.set_up_node_resource_group(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
node_resource_group="test_node_resource_group",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
def test_enabled_defender(self):
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{"enable_defender": True},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
dec_1.context.set_intermediate("subscription_id", "test_subscription_id")
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.ensure_default_log_analytics_workspace_for_monitoring",
return_value="test_workspace_resource_id",
):
dec_mc_1 = dec_1.context.get_defender_config()
ground_truth_mc_1 = self.models.ManagedClusterSecurityProfileAzureDefender(
enabled=True,
log_analytics_workspace_resource_id="test_workspace_resource_id",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
def test_disabled_defender(self):
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{"disable_defender": True},
ResourceType.MGMT_CONTAINERSERVICE,
)
dec_1.context.set_intermediate("subscription_id", "test_subscription_id")
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
dec_mc_1 = dec_1.context.get_defender_config()
ground_truth_mc_1 = self.models.ManagedClusterSecurityProfileAzureDefender(
enabled=False,
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
def test_set_up_defender(self):
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{"enable_defender": True},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
dec_1.context.set_intermediate("subscription_id", "test_subscription_id")
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.ensure_default_log_analytics_workspace_for_monitoring",
return_value="test_workspace_resource_id",
):
dec_mc_1 = dec_1.set_up_defender(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
security_profile=self.models.ManagedClusterSecurityProfile(
azure_defender=self.models.ManagedClusterSecurityProfileAzureDefender(
enabled=True,
log_analytics_workspace_resource_id="test_workspace_resource_id",
)
),
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
def test_construct_mc_profile_default(self):
import inspect
import paramiko
from azure.cli.command_modules.acs.custom import aks_create
optional_params = {}
positional_params = []
for _, v in inspect.signature(aks_create).parameters.items():
if v.default != v.empty:
optional_params[v.name] = v.default
else:
positional_params.append(v.name)
ground_truth_positional_params = [
"cmd",
"client",
"resource_group_name",
"name",
"ssh_key_value",
]
self.assertEqual(positional_params, ground_truth_positional_params)
# prepare ssh key
key = paramiko.RSAKey.generate(2048)
public_key = "{} {}".format(key.get_name(), key.get_base64())
# prepare a dictionary of default parameters
raw_param_dict = {
"resource_group_name": "test_rg_name",
"name": "test_name",
"ssh_key_value": public_key,
}
raw_param_dict.update(optional_params)
# default value in `aks_create`
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
raw_param_dict,
ResourceType.MGMT_CONTAINERSERVICE,
)
mock_profile = Mock(get_subscription_id=Mock(return_value="1234-5678-9012"))
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.get_rg_location",
return_value="test_location",
), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.Profile",
return_value=mock_profile,
):
dec_mc_1 = dec_1.construct_mc_profile_default()
upgrade_settings_1 = self.models.AgentPoolUpgradeSettings()
ground_truth_agentpool_1 = self.models.ManagedClusterAgentPoolProfile(
name="nodepool1",
orchestrator_version="",
vm_size=CONST_DEFAULT_NODE_VM_SIZE,
os_type=CONST_DEFAULT_NODE_OS_TYPE,
enable_node_public_ip=False,
enable_auto_scaling=False,
count=3,
node_taints=[],
os_disk_size_gb=0,
upgrade_settings=upgrade_settings_1,
type=CONST_VIRTUAL_MACHINE_SCALE_SETS,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
enable_fips=False,
mode=CONST_NODEPOOL_MODE_SYSTEM,
)
ssh_config_1 = self.models.ContainerServiceSshConfiguration(
public_keys=[self.models.ContainerServiceSshPublicKey(key_data=public_key)]
)
linux_profile_1 = self.models.ContainerServiceLinuxProfile(admin_username="azureuser", ssh=ssh_config_1)
network_profile_1 = self.models.ContainerServiceNetworkProfile(
load_balancer_sku="standard",
)
identity_1 = self.models.ManagedClusterIdentity(type="SystemAssigned")
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
dns_prefix="testname-testrgname-1234-5",
kubernetes_version="",
addon_profiles={},
enable_rbac=True,
agent_pool_profiles=[ground_truth_agentpool_1],
linux_profile=linux_profile_1,
network_profile=network_profile_1,
identity=identity_1,
disable_local_accounts=False,
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
dec_1.context.raw_param.print_usage_statistics()
def test_check_is_postprocessing_required(self):
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
self.assertEqual(dec_1.check_is_postprocessing_required(mc_1), False)
dec_1.context.set_intermediate("monitoring_addon_enabled", True)
self.assertEqual(dec_1.check_is_postprocessing_required(mc_1), True)
dec_1.context.remove_intermediate("monitoring_addon_enabled")
dec_1.context.set_intermediate("ingress_appgw_addon_enabled", True)
self.assertEqual(dec_1.check_is_postprocessing_required(mc_1), True)
dec_1.context.remove_intermediate("ingress_appgw_addon_enabled")
dec_1.context.set_intermediate("virtual_node_addon_enabled", True)
self.assertEqual(dec_1.check_is_postprocessing_required(mc_1), True)
dec_1.context.remove_intermediate("virtual_node_addon_enabled")
dec_1.context.set_intermediate("need_post_creation_vnet_permission_granting", True)
self.assertEqual(dec_1.check_is_postprocessing_required(mc_1), True)
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{"enable_managed_identity": True, "attach_acr": "test_attach_acr"},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
self.assertEqual(dec_2.check_is_postprocessing_required(mc_2), True)
def test_immediate_processing_after_request(self):
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{"vnet_subnet_id": "test_vnet_subnet_id"},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
dec_1.context.set_intermediate("need_post_creation_vnet_permission_granting", True)
self.client.get = Mock(return_value=Mock(identity=Mock(principal_id="test_principal_id")))
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.add_role_assignment", return_value=False
) as mock_add:
dec_1.immediate_processing_after_request(mc_1)
mock_add.assert_called_once_with(
self.cmd,
"Network Contributor",
"test_principal_id",
scope="test_vnet_subnet_id",
is_service_principal=False,
)
def test_postprocessing_after_mc_created(self):
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{"enable_msi_auth_for_monitoring": False},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
dec_1.context.set_intermediate("monitoring_addon_enabled", True)
mock_profile = Mock(get_subscription_id=Mock(return_value="1234-5678-9012"))
with patch("azure.cli.command_modules.acs.managed_cluster_decorator.Profile", return_value=mock_profile), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.add_monitoring_role_assignment"
) as mock_add:
dec_1.postprocessing_after_mc_created(mc_1)
mock_add.assert_called_once_with(mc_1, ANY, self.cmd)
dec_2 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{"resource_group_name": "test_rg_name", "name": "test_name", "enable_msi_auth_for_monitoring": True},
ResourceType.MGMT_CONTAINERSERVICE,
)
monitoring_addon_profile_2 = self.models.ManagedClusterAddonProfile(
enabled=True,
config={},
)
mc_2 = self.models.ManagedCluster(
location="test_location",
addon_profiles={
CONST_MONITORING_ADDON_NAME: monitoring_addon_profile_2,
},
)
dec_2.context.attach_mc(mc_2)
dec_2.context.set_intermediate("monitoring_addon_enabled", True)
mock_profile_2 = Mock(get_subscription_id=Mock(return_value="1234-5678-9012"))
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.Profile", return_value=mock_profile_2
), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.ensure_container_insights_for_monitoring"
) as mock_ensure:
dec_2.postprocessing_after_mc_created(mc_2)
mock_ensure.assert_called_once_with(
self.cmd,
monitoring_addon_profile_2,
"1234-5678-9012",
"test_rg_name",
"test_name",
"test_location",
remove_monitoring=False,
aad_route=True,
create_dcr=False,
create_dcra=True,
)
dec_3 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{"vnet_subnet_id": "test_vnet_subnet_id", "enable_managed_identity": True, "attach_acr": "test_attach_acr"},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(location="test_location")
dec_3.context.attach_mc(mc_3)
dec_3.context.set_intermediate("ingress_appgw_addon_enabled", True)
dec_3.context.set_intermediate("virtual_node_addon_enabled", True)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.add_ingress_appgw_addon_role_assignment"
) as mock_add_ingress_3, patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.add_virtual_node_role_assignment"
) as mock_add_virtual_3:
dec_3.postprocessing_after_mc_created(mc_3)
mock_add_ingress_3.assert_called_once_with(mc_3, self.cmd)
mock_add_virtual_3.assert_called_once_with(self.cmd, mc_3, "test_vnet_subnet_id")
dec_4 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{"enable_managed_identity": True, "attach_acr": "test_attach_acr"},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_4 = self.models.ManagedCluster(
location="test_location",
identity_profile={
"kubeletidentity": self.models.UserAssignedIdentity(
client_id="test_client_id", object_id="test_object_id"
)
},
)
dec_4.context.attach_mc(mc_4)
mock_profile_4 = Mock(get_subscription_id=Mock(return_value="1234-5678-9012"))
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.Profile", return_value=mock_profile_4
), patch("azure.cli.command_modules.acs.managed_cluster_decorator.ensure_aks_acr") as mock_ensure_4:
dec_4.postprocessing_after_mc_created(mc_4)
mock_ensure_4.assert_called_once_with(
self.cmd,
assignee="test_object_id",
acr_name_or_id="test_attach_acr",
subscription_id="1234-5678-9012",
is_service_principal=False,
)
def test_put_mc(self):
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{"enable_msi_auth_for_monitoring": False},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.sdk_no_wait",
return_value=mc_1,
):
self.assertEqual(dec_1.put_mc(mc_1), mc_1)
dec_1.context.set_intermediate("monitoring_addon_enabled", True)
mock_profile = Mock(get_subscription_id=Mock(return_value="1234-5678-9012"))
with patch("azure.cli.command_modules.acs.managed_cluster_decorator.Profile", return_value=mock_profile), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.add_monitoring_role_assignment"
) as mock_add, patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.LongRunningOperation",
return_value=Mock(return_value=mc_1),
):
self.assertEqual(dec_1.put_mc(mc_1), mc_1)
mock_add.assert_called_once_with(mc_1, ANY, self.cmd)
def test_create_mc(self):
# raise exception
dec_1 = AKSManagedClusterCreateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.create_mc(None)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
err_1 = HttpResponseError(message="not found in Active Directory tenant")
# fail on mock HttpResponseError, max retry exceeded
with self.assertRaises(AzCLIError), patch("time.sleep"), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.AKSManagedClusterCreateDecorator.put_mc",
side_effect=err_1,
):
dec_1.create_mc(mc_1)
# raise exception
resp = Mock(
reason="error reason",
status_code=500,
text=Mock(return_value="error text"),
)
err_2 = HttpResponseError(response=resp)
# fail on mock HttpResponseError
with self.assertRaises(AzureInternalError), patch("time.sleep"), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.AKSManagedClusterCreateDecorator.put_mc",
side_effect=[err_1, err_2],
):
dec_1.create_mc(mc_1)
# return mc
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.AKSManagedClusterCreateDecorator.put_mc",
return_value=mc_1,
):
self.assertEqual(dec_1.create_mc(mc_1), mc_1)
class AKSManagedClusterUpdateDecoratorTestCase(unittest.TestCase):
def setUp(self):
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.models = AKSManagedClusterModels(self.cmd, ResourceType.MGMT_CONTAINERSERVICE)
self.client = MockClient()
def test_init(self):
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
self.assertIsNotNone(dec_1.models)
self.assertIsNotNone(dec_1.context)
self.assertIsNotNone(dec_1.agentpool_decorator)
self.assertIsNotNone(dec_1.agentpool_context)
def test_check_raw_parameters(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on no updated parameter provided
with self.assertRaises(RequiredArgumentMissingError):
dec_1.check_raw_parameters()
# custom value
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"cluster_autoscaler_profile": {},
"api_server_authorized_ip_ranges": "",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
dec_2.check_raw_parameters()
def test_ensure_mc(self):
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1._ensure_mc(None)
mc_1 = self.models.ManagedCluster(location="test_location")
# fail on inconsistent mc with internal context
with self.assertRaises(CLIInternalError):
dec_1._ensure_mc(mc_1)
def test_fetch_mc(self):
mock_mc = self.models.ManagedCluster(
location="test_location",
)
self.client.get = Mock(return_value=mock_mc)
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"name": "test_cluster",
"resource_group_name": "test_rg_name",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
dec_mc = dec_1.fetch_mc()
ground_truth_mc = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc, ground_truth_mc)
self.assertEqual(dec_mc, dec_1.context.mc)
self.client.get.assert_called_once_with("test_rg_name", "test_cluster")
def test_update_agentpool_profile(self):
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"update_cluster_autoscaler": True,
"enable_cluster_autoscaler": False,
"disable_cluster_autoscaler": False,
"min_count": 3,
"max_count": 10,
"nodepool_labels": {"key1": "value1", "key2": "value2"},
},
ResourceType.MGMT_CONTAINERSERVICE,
)
agentpool_profile_1 = self.models.ManagedClusterAgentPoolProfile(
name="nodepool1",
count=3,
enable_auto_scaling=True,
min_count=1,
max_count=5,
)
mc_1 = self.models.ManagedCluster(
location="test_location",
agent_pool_profiles=[agentpool_profile_1],
)
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.update_agentpool_profile(None)
dec_mc_1 = dec_1.update_agentpool_profile(mc_1)
ground_truth_agentpool_profile_1 = self.models.ManagedClusterAgentPoolProfile(
name="nodepool1",
count=3,
enable_auto_scaling=True,
min_count=3,
max_count=10,
node_labels={"key1": "value1", "key2": "value2"},
)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
agent_pool_profiles=[ground_truth_agentpool_profile_1],
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"update_cluster_autoscaler": False,
"enable_cluster_autoscaler": False,
"disable_cluster_autoscaler": False,
"min_count": None,
"max_count": None,
"nodepool_labels": {},
},
ResourceType.MGMT_CONTAINERSERVICE,
)
agentpool_profile_21 = self.models.ManagedClusterAgentPoolProfile(
name="nodepool1",
node_labels={"key1": "value1", "key2": "value2"},
)
agentpool_profile_22 = self.models.ManagedClusterAgentPoolProfile(
name="nodepool2",
node_labels={"key1": "value1", "key2": "value2"},
)
mc_2 = self.models.ManagedCluster(
location="test_location",
agent_pool_profiles=[agentpool_profile_21, agentpool_profile_22],
)
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.update_agentpool_profile(mc_2)
ground_truth_agentpool_profile_21 = self.models.ManagedClusterAgentPoolProfile(
name="nodepool1",
node_labels={},
)
ground_truth_agentpool_profile_22 = self.models.ManagedClusterAgentPoolProfile(
name="nodepool2",
node_labels={},
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
agent_pool_profiles=[ground_truth_agentpool_profile_21, ground_truth_agentpool_profile_22],
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
dec_3 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(location="test_location")
dec_3.context.attach_mc(mc_3)
# fail on incomplete mc object (no agent pool profiles)
with self.assertRaises(UnknownError):
dec_3.update_agentpool_profile(mc_3)
def test_update_auto_scaler_profile(self):
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"cluster_autoscaler_profile": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(
location="test_location",
auto_scaler_profile=self.models.ManagedClusterPropertiesAutoScalerProfile(
scan_interval="10s",
),
)
dec_1.context.attach_mc(mc_1)
dec_mc_1 = dec_1.update_auto_scaler_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
auto_scaler_profile=self.models.ManagedClusterPropertiesAutoScalerProfile(
scan_interval="10s",
),
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"cluster_autoscaler_profile": {},
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(
location="test_location",
auto_scaler_profile=self.models.ManagedClusterPropertiesAutoScalerProfile(
scan_interval="10s",
),
)
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.update_auto_scaler_profile(mc_2)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
auto_scaler_profile={},
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_update_tags(self):
# default value in `aks_create`
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"tags": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(
location="test_location",
tags={"abc": "xyz"},
)
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.update_tags(None)
dec_mc_1 = dec_1.update_tags(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
tags={"abc": "xyz"},
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom_value
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"tags": {},
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(
location="test_location",
tags={"abc": "xyz"},
)
dec_2.context.attach_mc(mc_2)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_2.update_tags(None)
dec_mc_2 = dec_2.update_tags(mc_2)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
tags={},
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_process_attach_detach_acr(self):
# default value in `aks_update`
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"attach_acr": None,
"detach_acr": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(
location="test_location",
identity=self.models.ManagedClusterIdentity(type="SystemAssigned"),
identity_profile={
"kubeletidentity": self.models.UserAssignedIdentity(
client_id="test_client_id", object_id="test_object_id"
)
},
)
dec_1.context.attach_mc(mc_1)
dec_1.context.set_intermediate("subscription_id", "test_subscription_id")
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.process_attach_detach_acr(None)
dec_1.process_attach_detach_acr(mc_1)
# custom value
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"attach_acr": "test_attach_acr",
"detach_acr": "test_detach_acr",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(
location="test_location",
identity=self.models.ManagedClusterIdentity(type="SystemAssigned"),
identity_profile={
"kubeletidentity": self.models.UserAssignedIdentity(
client_id="test_client_id", object_id="test_object_id"
)
},
)
dec_2.context.attach_mc(mc_2)
dec_2.context.set_intermediate("subscription_id", "test_subscription_id")
with patch("azure.cli.command_modules.acs.managed_cluster_decorator.ensure_aks_acr") as ensure_acr:
dec_2.process_attach_detach_acr(mc_2)
ensure_acr.assert_has_calls(
[
call(
self.cmd,
assignee="test_object_id",
acr_name_or_id="test_attach_acr",
subscription_id="test_subscription_id",
is_service_principal=False,
),
call(
self.cmd,
assignee="test_object_id",
acr_name_or_id="test_detach_acr",
subscription_id="test_subscription_id",
detach=True,
is_service_principal=False,
),
]
)
def test_update_sku(self):
# default value in `aks_update`
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"uptime_sla": False,
"no_uptime_sla": False,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(
location="test_location",
sku=self.models.ManagedClusterSKU(
name="Basic",
tier="Free",
),
)
dec_1.context.attach_mc(mc_1)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.update_sku(None)
dec_mc_1 = dec_1.update_sku(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
sku=self.models.ManagedClusterSKU(
name="Basic",
tier="Free",
),
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"uptime_sla": True,
"no_uptime_sla": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(
location="test_location",
sku=self.models.ManagedClusterSKU(
name="Basic",
tier="Free",
),
)
dec_2.context.attach_mc(mc_2)
# fail on mutually exclusive uptime_sla and no_uptime_sla
with self.assertRaises(MutuallyExclusiveArgumentError):
dec_2.update_sku(mc_2)
# custom value
dec_3 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"uptime_sla": False,
"no_uptime_sla": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(
location="test_location",
sku=self.models.ManagedClusterSKU(
name="Basic",
tier="Paid",
),
)
dec_3.context.attach_mc(mc_3)
dec_mc_3 = dec_3.update_sku(mc_3)
ground_truth_mc_3 = self.models.ManagedCluster(
location="test_location",
sku=self.models.ManagedClusterSKU(
name="Basic",
tier="Free",
),
)
self.assertEqual(dec_mc_3, ground_truth_mc_3)
# custom value
dec_4 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"uptime_sla": True,
"no_uptime_sla": False,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_4 = self.models.ManagedCluster(
location="test_location",
sku=self.models.ManagedClusterSKU(
name="Basic",
tier="Free",
),
)
dec_4.context.attach_mc(mc_4)
dec_mc_4 = dec_4.update_sku(mc_4)
ground_truth_mc_4 = self.models.ManagedCluster(
location="test_location",
sku=self.models.ManagedClusterSKU(
name="Basic",
tier="Paid",
),
)
self.assertEqual(dec_mc_4, ground_truth_mc_4)
def test_update_load_balancer_profile(self):
# default value in `aks_update`
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"load_balancer_sku": None,
"load_balancer_managed_outbound_ip_count": None,
"load_balancer_outbound_ips": None,
"load_balancer_outbound_ip_prefixes": None,
"load_balancer_outbound_ports": None,
"load_balancer_idle_timeout": None,
},
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.update_load_balancer_profile(None)
load_balancer_profile_1 = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile()
network_profile_1 = self.models.ContainerServiceNetworkProfile(
load_balancer_profile=load_balancer_profile_1,
)
mc_1 = self.models.ManagedCluster(
location="test_location",
network_profile=network_profile_1,
)
dec_1.context.attach_mc(mc_1)
dec_mc_1 = dec_1.update_load_balancer_profile(mc_1)
ground_truth_load_balancer_profile_1 = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile()
ground_truth_network_profile_1 = self.models.ContainerServiceNetworkProfile(
load_balancer_profile=ground_truth_load_balancer_profile_1,
)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
network_profile=ground_truth_network_profile_1,
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
# fail on incomplete mc object (no network profile)
with self.assertRaises(UnknownError):
dec_2.update_load_balancer_profile(mc_2)
# custom value
dec_3 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"load_balancer_sku": None,
"load_balancer_managed_outbound_ip_count": 10,
"load_balancer_outbound_ips": None,
"load_balancer_outbound_ip_prefixes": "test_ip_prefix_1,test_ip_prefix_2",
"load_balancer_outbound_ports": 20,
"load_balancer_idle_timeout": 30,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
load_balancer_profile_3 = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile(
managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=3
),
outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPs(
public_i_ps=[
self.models.load_balancer_models.ResourceReference(id="test_ip_1"),
self.models.load_balancer_models.ResourceReference(id="test_ip_2"),
]
),
allocated_outbound_ports=5,
)
network_profile_3 = self.models.ContainerServiceNetworkProfile(
load_balancer_profile=load_balancer_profile_3,
)
mc_3 = self.models.ManagedCluster(
location="test_location",
network_profile=network_profile_3,
)
dec_3.context.attach_mc(mc_3)
dec_mc_3 = dec_3.update_load_balancer_profile(mc_3)
ground_truth_load_balancer_profile_3 = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile(
managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=10
),
outbound_i_ps=None,
outbound_ip_prefixes=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes=[
self.models.load_balancer_models.ResourceReference(id="test_ip_prefix_1"),
self.models.load_balancer_models.ResourceReference(id="test_ip_prefix_2"),
]
),
allocated_outbound_ports=20,
idle_timeout_in_minutes=30,
)
ground_truth_network_profile_3 = self.models.ContainerServiceNetworkProfile(
load_balancer_profile=ground_truth_load_balancer_profile_3,
)
ground_truth_mc_3 = self.models.ManagedCluster(
location="test_location",
network_profile=ground_truth_network_profile_3,
)
self.assertEqual(dec_mc_3, ground_truth_mc_3)
def test_update_nat_gateway_profile(self):
# default value in `aks_update`
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"nat_gateway_managed_outbound_ip_count": None,
"nat_gateway_idle_timeout": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.update_nat_gateway_profile(None)
mc_1 = self.models.ManagedCluster(
location="test_location",
network_profile=self.models.ContainerServiceNetworkProfile(
nat_gateway_profile=self.models.nat_gateway_models.ManagedClusterNATGatewayProfile(),
),
)
dec_1.context.attach_mc(mc_1)
dec_mc_1 = dec_1.update_nat_gateway_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
network_profile=self.models.ContainerServiceNetworkProfile(
nat_gateway_profile=self.models.nat_gateway_models.ManagedClusterNATGatewayProfile(),
),
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"nat_gateway_managed_outbound_ip_count": 5,
"nat_gateway_idle_timeout": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_2.context.attach_mc(mc_2)
# fail on incomplete mc object (no network profile)
with self.assertRaises(UnknownError):
dec_2.update_nat_gateway_profile(mc_2)
# custom value
dec_3 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"nat_gateway_managed_outbound_ip_count": 5,
"nat_gateway_idle_timeout": 30,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(
location="test_location",
network_profile=self.models.ContainerServiceNetworkProfile(
nat_gateway_profile=self.models.nat_gateway_models.ManagedClusterNATGatewayProfile(
managed_outbound_ip_profile=self.models.nat_gateway_models.ManagedClusterManagedOutboundIPProfile(
count=10
),
idle_timeout_in_minutes=20,
)
),
)
dec_3.context.attach_mc(mc_3)
dec_mc_3 = dec_3.update_nat_gateway_profile(mc_3)
ground_truth_mc_3 = self.models.ManagedCluster(
location="test_location",
network_profile=self.models.ContainerServiceNetworkProfile(
nat_gateway_profile=self.models.nat_gateway_models.ManagedClusterNATGatewayProfile(
managed_outbound_ip_profile=self.models.nat_gateway_models.ManagedClusterManagedOutboundIPProfile(
count=5
),
idle_timeout_in_minutes=30,
)
),
)
self.assertEqual(dec_mc_3, ground_truth_mc_3)
def test_update_disable_local_accounts(self):
# default value in `aks_update`
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"disable_local_accounts": False,
"enable_local_accounts": False,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.update_disable_local_accounts(None)
mc_1 = self.models.ManagedCluster(
location="test_location",
disable_local_accounts=True,
)
dec_1.context.attach_mc(mc_1)
dec_mc_1 = dec_1.update_disable_local_accounts(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
disable_local_accounts=True,
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"disable_local_accounts": True,
"enable_local_accounts": False,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(
location="test_location",
disable_local_accounts=False,
)
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.update_disable_local_accounts(mc_2)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
disable_local_accounts=True,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
# custom value
dec_3 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"disable_local_accounts": False,
"enable_local_accounts": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(
location="test_location",
disable_local_accounts=True,
)
dec_3.context.attach_mc(mc_3)
dec_mc_3 = dec_3.update_disable_local_accounts(mc_3)
ground_truth_mc_3 = self.models.ManagedCluster(
location="test_location",
disable_local_accounts=False,
)
self.assertEqual(dec_mc_3, ground_truth_mc_3)
def test_update_api_server_access_profile(self):
# default value in `aks_update`
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"api_server_authorized_ip_ranges": None,
"disable_public_fqdn": False,
"enable_public_fqdn": False,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.update_api_server_access_profile(None)
mc_1 = self.models.ManagedCluster(
location="test_location",
)
dec_1.context.attach_mc(mc_1)
dec_mc_1 = dec_1.update_api_server_access_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"api_server_authorized_ip_ranges": "",
"disable_public_fqdn": True,
"enable_public_fqdn": False,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
api_server_access_profile_2 = self.models.ManagedClusterAPIServerAccessProfile(
authorized_ip_ranges=["test_ip_1", "test_ip_2"],
enable_private_cluster=True,
enable_private_cluster_public_fqdn=True,
private_dns_zone=CONST_PRIVATE_DNS_ZONE_SYSTEM,
)
mc_2 = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile_2,
)
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.update_api_server_access_profile(mc_2)
ground_truth_api_server_access_profile_2 = self.models.ManagedClusterAPIServerAccessProfile(
authorized_ip_ranges=[],
enable_private_cluster=True,
enable_private_cluster_public_fqdn=False,
private_dns_zone=CONST_PRIVATE_DNS_ZONE_SYSTEM,
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=ground_truth_api_server_access_profile_2,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
# custom value
dec_3 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"api_server_authorized_ip_ranges": None,
"disable_public_fqdn": False,
"enable_public_fqdn": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
api_server_access_profile_3 = self.models.ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True,
enable_private_cluster_public_fqdn=False,
private_dns_zone=CONST_PRIVATE_DNS_ZONE_NONE,
)
mc_3 = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=api_server_access_profile_3,
)
dec_3.context.attach_mc(mc_3)
dec_mc_3 = dec_3.update_api_server_access_profile(mc_3)
ground_truth_api_server_access_profile_3 = self.models.ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True,
enable_private_cluster_public_fqdn=True,
private_dns_zone=CONST_PRIVATE_DNS_ZONE_NONE,
)
ground_truth_mc_3 = self.models.ManagedCluster(
location="test_location",
api_server_access_profile=ground_truth_api_server_access_profile_3,
)
self.assertEqual(dec_mc_3, ground_truth_mc_3)
def test_update_windows_profile(self):
# default value in `aks_update`
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_ahub": False,
"disable_ahub": False,
"windows_admin_password": None,
"enable_windows_gmsa": False,
"gmsa_dns_server": None,
"gmsa_root_domain_name": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.update_windows_profile(None)
mc_1 = self.models.ManagedCluster(
location="test_location",
)
dec_1.context.attach_mc(mc_1)
dec_mc_1 = dec_1.update_windows_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_ahub": True,
"disable_ahub": False,
"windows_admin_password": "test_admin_password",
"enable_windows_gmsa": True,
"gmsa_dns_server": "test_gmsa_dns_server",
"gmsa_root_domain_name": "test_gmsa_root_domain_name",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
gmsa_profile_2 = self.models.WindowsGmsaProfile(
enabled=True,
dns_server="test_mc_gmsa_dns_server",
root_domain_name="test_mc_gmsa_root_domain_name",
)
windows_profile_2 = self.models.ManagedClusterWindowsProfile(
# [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="fake secrets in unit test")]
admin_username="test_mc_win_admin_name",
admin_password="test_mc_win_admin_pd",
gmsa_profile=gmsa_profile_2,
)
mc_2 = self.models.ManagedCluster(
location="test_location",
windows_profile=windows_profile_2,
)
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.update_windows_profile(mc_2)
ground_truth_gmsa_profile_2 = self.models.WindowsGmsaProfile(
enabled=True,
dns_server="test_gmsa_dns_server",
root_domain_name="test_gmsa_root_domain_name",
)
ground_truth_windows_profile_2 = self.models.ManagedClusterWindowsProfile(
# [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="fake secrets in unit test")]
admin_username="test_mc_win_admin_name",
admin_password="test_admin_password",
license_type="Windows_Server",
gmsa_profile=ground_truth_gmsa_profile_2,
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
windows_profile=ground_truth_windows_profile_2,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
# custom value
dec_3 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_ahub": False,
"disable_ahub": True,
"windows_admin_password": None,
"enable_windows_gmsa": False,
"gmsa_dns_server": None,
"gmsa_root_domain_name": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
windows_profile_3 = self.models.ManagedClusterWindowsProfile(
# [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="fake secrets in unit test")]
admin_username="test_mc_win_admin_name",
admin_password="test_mc_win_admin_pd",
)
mc_3 = self.models.ManagedCluster(
location="test_location",
windows_profile=windows_profile_3,
)
dec_3.context.attach_mc(mc_3)
dec_mc_3 = dec_3.update_windows_profile(mc_3)
ground_truth_windows_profile_3 = self.models.ManagedClusterWindowsProfile(
# [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="fake secrets in unit test")]
admin_username="test_mc_win_admin_name",
admin_password="test_mc_win_admin_pd",
license_type="None",
)
ground_truth_mc_3 = self.models.ManagedCluster(
location="test_location",
windows_profile=ground_truth_windows_profile_3,
)
self.assertEqual(dec_mc_3, ground_truth_mc_3)
# custom value
dec_4 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_ahub": True,
"disable_ahub": False,
"windows_admin_password": None,
"enable_windows_gmsa": False,
"gmsa_dns_server": None,
"gmsa_root_domain_name": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_4 = self.models.ManagedCluster(
location="test_location",
)
dec_4.context.attach_mc(mc_4)
# fail on incomplete mc object (no windows profile)
with self.assertRaises(UnknownError):
dec_4.update_windows_profile(mc_4)
# custom value
dec_5 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_ahub": False,
"disable_ahub": False,
"windows_admin_password": None,
"enable_windows_gmsa": True,
"gmsa_dns_server": None,
"gmsa_root_domain_name": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_5 = self.models.ManagedCluster(
location="test_location",
)
dec_5.context.attach_mc(mc_5)
# fail on incomplete mc object (no windows profile)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.prompt_y_n",
return_value=True,
), self.assertRaises(UnknownError):
dec_5.update_windows_profile(mc_5)
def test_update_aad_profile(self):
# default value in `aks_update`
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_aad": False,
"aad_tenant_id": None,
"aad_admin_group_object_ids": None,
"enable_azure_rbac": False,
"disable_azure_rbac": False,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.update_aad_profile(None)
mc_1 = self.models.ManagedCluster(
location="test_location",
)
dec_1.context.attach_mc(mc_1)
dec_mc_1 = dec_1.update_aad_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_aad": True,
"aad_tenant_id": None,
"aad_admin_group_object_ids": None,
"enable_azure_rbac": False,
"disable_azure_rbac": False,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
aad_profile_2 = self.models.ManagedClusterAADProfile(
managed=False,
)
mc_2 = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_2)
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.update_aad_profile(mc_2)
ground_truth_aad_profile_2 = self.models.ManagedClusterAADProfile(
managed=True,
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
aad_profile=ground_truth_aad_profile_2,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
# custom value
dec_3 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_aad": False,
"aad_tenant_id": "test_aad_tenant_id",
"aad_admin_group_object_ids": "test_admin_1,test_admin_2",
"enable_azure_rbac": True,
"disable_azure_rbac": False,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
aad_profile_3 = self.models.ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=False,
)
mc_3 = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_3)
dec_3.context.attach_mc(mc_3)
dec_mc_3 = dec_3.update_aad_profile(mc_3)
ground_truth_aad_profile_3 = self.models.ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=True,
tenant_id="test_aad_tenant_id",
admin_group_object_i_ds=["test_admin_1", "test_admin_2"],
)
ground_truth_mc_3 = self.models.ManagedCluster(
location="test_location",
aad_profile=ground_truth_aad_profile_3,
)
self.assertEqual(dec_mc_3, ground_truth_mc_3)
# custom value
dec_4 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_aad": False,
"aad_tenant_id": None,
"aad_admin_group_object_ids": None,
"enable_azure_rbac": False,
"disable_azure_rbac": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
aad_profile_4 = self.models.ManagedClusterAADProfile(
managed=True,
tenant_id="test_aad_tenant_id",
admin_group_object_i_ds=["test_admin_1", "test_admin_2"],
enable_azure_rbac=True,
)
mc_4 = self.models.ManagedCluster(location="test_location", aad_profile=aad_profile_4)
dec_4.context.attach_mc(mc_4)
dec_mc_4 = dec_4.update_aad_profile(mc_4)
ground_truth_aad_profile_4 = self.models.ManagedClusterAADProfile(
managed=True,
tenant_id="test_aad_tenant_id",
admin_group_object_i_ds=["test_admin_1", "test_admin_2"],
enable_azure_rbac=False,
)
ground_truth_mc_4 = self.models.ManagedCluster(
location="test_location",
aad_profile=ground_truth_aad_profile_4,
)
self.assertEqual(dec_mc_4, ground_truth_mc_4)
def test_update_auto_upgrade_profile(self):
# default value in `aks_update`
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"auto_upgrade_channel": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.update_auto_upgrade_profile(None)
mc_1 = self.models.ManagedCluster(
location="test_location",
)
dec_1.context.attach_mc(mc_1)
dec_mc_1 = dec_1.update_auto_upgrade_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"auto_upgrade_channel": "stable",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(
location="test_location",
)
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.update_auto_upgrade_profile(mc_2)
auto_upgrade_profile_2 = self.models.ManagedClusterAutoUpgradeProfile(upgrade_channel="stable")
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
auto_upgrade_profile=auto_upgrade_profile_2,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_update_identity(self):
# default value in `aks_update`
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": False,
"assign_identity": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.update_identity(None)
mc_1 = self.models.ManagedCluster(
location="test_location",
)
dec_1.context.attach_mc(mc_1)
dec_mc_1 = dec_1.update_identity(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": False,
"assign_identity": "test_assign_identity",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(
location="test_location",
)
dec_2.context.attach_mc(mc_2)
# fail on enable_managed_identity not specified
with self.assertRaises(RequiredArgumentMissingError):
dec_2.update_identity(mc_2)
# custom value
dec_3 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": True,
"assign_identity": "test_assign_identity",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(
location="test_location",
)
dec_3.context.attach_mc(mc_3)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.prompt_y_n",
return_value=False,
):
# fail on user does not confirm
with self.assertRaises(DecoratorEarlyExitException):
dec_3.update_identity(mc_3)
# custom value
dec_4 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": True,
"assign_identity": "test_assign_identity",
"yes": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
identity_4 = self.models.ManagedClusterIdentity(type="SystemAssigned")
mc_4 = self.models.ManagedCluster(
location="test_location",
identity=identity_4,
)
dec_4.context.attach_mc(mc_4)
dec_4.update_identity(mc_4)
ground_truth_user_assigned_identity_4 = {
"test_assign_identity": self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()
}
ground_truth_identity_4 = self.models.ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=ground_truth_user_assigned_identity_4,
)
ground_truth_mc_4 = self.models.ManagedCluster(
location="test_location",
identity=ground_truth_identity_4,
)
self.assertEqual(mc_4, ground_truth_mc_4)
# custom value
dec_5 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": True,
"assign_identity": None,
"yes": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
user_assigned_identity_5 = {
"test_assign_identity": self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()
}
identity_5 = self.models.ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity_5,
)
mc_5 = self.models.ManagedCluster(
location="test_location",
identity=identity_5,
)
dec_5.context.attach_mc(mc_5)
dec_5.update_identity(mc_5)
ground_truth_identity_5 = self.models.ManagedClusterIdentity(
type="SystemAssigned",
)
ground_truth_mc_5 = self.models.ManagedCluster(
location="test_location",
identity=ground_truth_identity_5,
)
self.assertEqual(mc_5, ground_truth_mc_5)
def test_update_azure_keyvault_secrets_provider_addon_profile(self):
# default
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_secret_rotation": False,
"disable_secret_rotation": False,
"rotation_poll_interval": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
dec_1.update_azure_keyvault_secrets_provider_addon_profile(None)
# custom value
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_secret_rotation": True,
"disable_secret_rotation": False,
"rotation_poll_interval": "5m",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
azure_keyvault_secrets_provider_addon_profile_2 = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_SECRET_ROTATION_ENABLED: "false",
CONST_ROTATION_POLL_INTERVAL: "2m",
},
)
mc_2 = self.models.ManagedCluster(
location="test_location",
addon_profiles={
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME: azure_keyvault_secrets_provider_addon_profile_2
},
)
dec_2.context.attach_mc(mc_2)
dec_2.update_azure_keyvault_secrets_provider_addon_profile(azure_keyvault_secrets_provider_addon_profile_2)
ground_truth_azure_keyvault_secrets_provider_addon_profile_2 = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_SECRET_ROTATION_ENABLED: "true",
CONST_ROTATION_POLL_INTERVAL: "5m",
},
)
self.assertEqual(
azure_keyvault_secrets_provider_addon_profile_2,
ground_truth_azure_keyvault_secrets_provider_addon_profile_2,
)
# custom value
dec_3 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_secret_rotation": False,
"disable_secret_rotation": True,
"rotation_poll_interval": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
azure_keyvault_secrets_provider_addon_profile_3 = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_SECRET_ROTATION_ENABLED: "true",
CONST_ROTATION_POLL_INTERVAL: "2m",
},
)
mc_3 = self.models.ManagedCluster(
location="test_location",
addon_profiles={
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME: azure_keyvault_secrets_provider_addon_profile_3
},
)
dec_3.context.attach_mc(mc_3)
dec_3.update_azure_keyvault_secrets_provider_addon_profile(azure_keyvault_secrets_provider_addon_profile_3)
ground_truth_azure_keyvault_secrets_provider_addon_profile_3 = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_SECRET_ROTATION_ENABLED: "false",
CONST_ROTATION_POLL_INTERVAL: "2m",
},
)
self.assertEqual(
azure_keyvault_secrets_provider_addon_profile_3,
ground_truth_azure_keyvault_secrets_provider_addon_profile_3,
)
def test_update_addon_profiles(self):
# default value in `aks_update`
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_secret_rotation": False,
"disable_secret_rotation": False,
"rotation_poll_interval": None,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.update_addon_profiles(None)
monitoring_addon_profile_1 = self.models.ManagedClusterAddonProfile(
enabled=True,
config={},
)
ingress_appgw_addon_profile_1 = self.models.ManagedClusterAddonProfile(
enabled=True,
config={},
)
virtual_node_addon_profile_1 = self.models.ManagedClusterAddonProfile(
enabled=True,
config={},
)
mc_1 = self.models.ManagedCluster(
location="test_location",
addon_profiles={
CONST_MONITORING_ADDON_NAME: monitoring_addon_profile_1,
CONST_INGRESS_APPGW_ADDON_NAME: ingress_appgw_addon_profile_1,
CONST_VIRTUAL_NODE_ADDON_NAME
+ dec_1.context.get_virtual_node_addon_os_type(): virtual_node_addon_profile_1,
},
)
dec_1.context.attach_mc(mc_1)
dec_mc_1 = dec_1.update_addon_profiles(mc_1)
ground_truth_monitoring_addon_profile_1 = self.models.ManagedClusterAddonProfile(
enabled=True,
config={},
)
ground_truth_ingress_appgw_addon_profile_1 = self.models.ManagedClusterAddonProfile(
enabled=True,
config={},
)
ground_truth_virtual_node_addon_profile_1 = self.models.ManagedClusterAddonProfile(
enabled=True,
config={},
)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
addon_profiles={
CONST_MONITORING_ADDON_NAME: ground_truth_monitoring_addon_profile_1,
CONST_INGRESS_APPGW_ADDON_NAME: ground_truth_ingress_appgw_addon_profile_1,
CONST_VIRTUAL_NODE_ADDON_NAME
+ dec_1.context.get_virtual_node_addon_os_type(): ground_truth_virtual_node_addon_profile_1,
},
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
self.assertEqual(dec_1.context.get_intermediate("monitoring_addon_enabled"), True)
self.assertEqual(dec_1.context.get_intermediate("ingress_appgw_addon_enabled"), True)
self.assertEqual(dec_1.context.get_intermediate("virtual_node_addon_enabled"), True)
def test_update_defender(self):
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{"enable_defender": True},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
dec_1.context.set_intermediate("subscription_id", "test_subscription_id")
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.ensure_default_log_analytics_workspace_for_monitoring",
return_value="test_workspace_resource_id",
):
dec_mc_1 = dec_1.update_defender(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
security_profile=self.models.ManagedClusterSecurityProfile(
azure_defender=self.models.ManagedClusterSecurityProfileAzureDefender(
enabled=True,
log_analytics_workspace_resource_id="test_workspace_resource_id",
)
),
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
def test_update_identity_profile(self):
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(
location="test_location",
)
dec_1.context.attach_mc(mc_1)
dec_mc_1 = dec_1.update_identity_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
cluster_identity_obj = Mock(
client_id="test_cluster_identity_client_id",
principal_id="test_cluster_identity_object_id",
)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.AKSManagedClusterContext.get_identity_by_msi_client",
side_effect=[cluster_identity_obj],
), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.ensure_cluster_identity_permission_on_kubelet_identity",
return_value=None,
):
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"assign_kubelet_identity": "test_assign_kubelet_identity",
"yes": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
cluster_identity = self.models.ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities={"test_assign_identity": {}},
)
mc_2 = self.models.ManagedCluster(location="test_location", identity=cluster_identity)
dec_2.context.attach_mc(mc_2)
dec_mc_2 = dec_2.update_identity_profile(mc_2)
identity_profile_2 = {
"kubeletidentity": self.models.UserAssignedIdentity(
resource_id="test_assign_kubelet_identity",
)
}
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
identity=cluster_identity,
identity_profile=identity_profile_2,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.prompt_y_n",
return_value=False,
), self.assertRaises(DecoratorEarlyExitException):
dec_3 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"assign_kubelet_identity": "test_assign_kubelet_identity",
},
ResourceType.MGMT_CONTAINERSERVICE,
)
cluster_identity = self.models.ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities={"test_assign_identity": {}},
)
mc_3 = self.models.ManagedCluster(location="test_location", identity=cluster_identity)
dec_3.context.attach_mc(mc_3)
dec_3.update_identity_profile(mc_3)
with self.assertRaises(RequiredArgumentMissingError):
dec_4 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"assign_kubelet_identity": "test_assign_kubelet_identity",
"yes": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_4 = self.models.ManagedCluster(location="test_location")
dec_4.context.attach_mc(mc_4)
dec_4.update_identity_profile(mc_4)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.AKSManagedClusterContext.get_identity_by_msi_client",
side_effect=[cluster_identity_obj],
), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.ensure_cluster_identity_permission_on_kubelet_identity",
return_value=None,
):
dec_5 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": True,
"assign_identity": "test_assign_identity",
"assign_kubelet_identity": "test_assign_kubelet_identity",
"yes": True,
},
ResourceType.MGMT_CONTAINERSERVICE,
)
cluster_identity = self.models.ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities={"test_assign_identity": {}},
)
mc_5 = self.models.ManagedCluster(location="test_location", identity=cluster_identity)
dec_5.context.attach_mc(mc_5)
dec_mc_5 = dec_5.update_identity_profile(mc_5)
identity_profile_5 = {
"kubeletidentity": self.models.UserAssignedIdentity(
resource_id="test_assign_kubelet_identity",
)
}
ground_truth_mc_5 = self.models.ManagedCluster(
location="test_location",
identity=cluster_identity,
identity_profile=identity_profile_5,
)
self.assertEqual(dec_mc_5, ground_truth_mc_5)
def test_update_mc_profile_default(self):
import inspect
from azure.cli.command_modules.acs.custom import aks_update
optional_params = {}
positional_params = []
for _, v in inspect.signature(aks_update).parameters.items():
if v.default != v.empty:
optional_params[v.name] = v.default
else:
positional_params.append(v.name)
ground_truth_positional_params = [
"cmd",
"client",
"resource_group_name",
"name",
]
self.assertEqual(positional_params, ground_truth_positional_params)
# prepare a dictionary of default parameters
raw_param_dict = {
"resource_group_name": "test_rg_name",
"name": "test_name",
}
raw_param_dict.update(optional_params)
# default value in `update`
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
raw_param_dict,
ResourceType.MGMT_CONTAINERSERVICE,
)
mock_profile = Mock(get_subscription_id=Mock(return_value="1234-5678-9012"))
mock_existing_mc = self.models.ManagedCluster(
location="test_location",
agent_pool_profiles=[
self.models.ManagedClusterAgentPoolProfile(
name="nodepool1",
)
],
network_profile=self.models.ContainerServiceNetworkProfile(
load_balancer_sku="standard",
),
identity=self.models.ManagedClusterIdentity(type="SystemAssigned"),
identity_profile={
"kubeletidentity": self.models.UserAssignedIdentity(
resource_id="test_resource_id",
client_id="test_client_id",
object_id="test_object_id",
)
},
)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.get_rg_location",
return_value="test_location",
), patch("azure.cli.command_modules.acs.managed_cluster_decorator.Profile", return_value=mock_profile,), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.AKSManagedClusterUpdateDecorator.check_raw_parameters",
return_value=True,
), patch.object(
self.client, "get", return_value=mock_existing_mc
):
dec_mc_1 = dec_1.update_mc_profile_default()
ground_truth_agent_pool_profile_1 = self.models.ManagedClusterAgentPoolProfile(
name="nodepool1",
)
ground_truth_network_profile_1 = self.models.ContainerServiceNetworkProfile(
load_balancer_sku="standard",
)
ground_truth_identity_1 = self.models.ManagedClusterIdentity(type="SystemAssigned")
ground_truth_identity_profile_1 = {
"kubeletidentity": self.models.UserAssignedIdentity(
resource_id="test_resource_id",
client_id="test_client_id",
object_id="test_object_id",
)
}
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
agent_pool_profiles=[ground_truth_agent_pool_profile_1],
network_profile=ground_truth_network_profile_1,
identity=ground_truth_identity_1,
identity_profile=ground_truth_identity_profile_1,
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
dec_1.context.raw_param.print_usage_statistics()
def test_check_is_postprocessing_required(self):
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
self.assertEqual(dec_1.check_is_postprocessing_required(mc_1), False)
dec_1.context.set_intermediate("monitoring_addon_enabled", True)
self.assertEqual(dec_1.check_is_postprocessing_required(mc_1), True)
dec_1.context.remove_intermediate("monitoring_addon_enabled")
dec_1.context.set_intermediate("ingress_appgw_addon_enabled", True)
self.assertEqual(dec_1.check_is_postprocessing_required(mc_1), True)
dec_1.context.remove_intermediate("ingress_appgw_addon_enabled")
dec_1.context.set_intermediate("virtual_node_addon_enabled", True)
self.assertEqual(dec_1.check_is_postprocessing_required(mc_1), True)
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{"attach_acr": "test_attach_acr"},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_2 = self.models.ManagedCluster(
location="test_location", identity=self.models.ManagedClusterIdentity(type="SystemAssigned")
)
dec_2.context.attach_mc(mc_2)
self.assertEqual(dec_2.check_is_postprocessing_required(mc_2), True)
def test_immediate_processing_after_request(self):
pass
def test_postprocessing_after_mc_created(self):
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{"enable_msi_auth_for_monitoring": False},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
dec_1.context.set_intermediate("monitoring_addon_enabled", True)
mock_profile = Mock(get_subscription_id=Mock(return_value="1234-5678-9012"))
with patch("azure.cli.command_modules.acs.managed_cluster_decorator.Profile", return_value=mock_profile), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.add_monitoring_role_assignment"
) as mock_add:
dec_1.postprocessing_after_mc_created(mc_1)
mock_add.assert_called_once_with(mc_1, ANY, self.cmd)
dec_2 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{"resource_group_name": "test_rg_name", "name": "test_name", "enable_msi_auth_for_monitoring": True},
ResourceType.MGMT_CONTAINERSERVICE,
)
monitoring_addon_profile_2 = self.models.ManagedClusterAddonProfile(
enabled=True,
config={},
)
mc_2 = self.models.ManagedCluster(
location="test_location",
addon_profiles={
CONST_MONITORING_ADDON_NAME: monitoring_addon_profile_2,
},
)
dec_2.context.attach_mc(mc_2)
dec_2.context.set_intermediate("monitoring_addon_enabled", True)
mock_profile_2 = Mock(get_subscription_id=Mock(return_value="1234-5678-9012"))
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.Profile", return_value=mock_profile_2
), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.ensure_container_insights_for_monitoring"
) as mock_ensure:
dec_2.postprocessing_after_mc_created(mc_2)
mock_ensure.assert_called_once_with(
self.cmd,
monitoring_addon_profile_2,
"1234-5678-9012",
"test_rg_name",
"test_name",
"test_location",
remove_monitoring=False,
aad_route=True,
create_dcr=False,
create_dcra=True,
)
dec_3 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{"vnet_subnet_id": "test_vnet_subnet_id", "attach_acr": "test_attach_acr"},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_3 = self.models.ManagedCluster(
location="test_location", identity=self.models.ManagedClusterIdentity(type="SystemAssigned")
)
dec_3.context.attach_mc(mc_3)
dec_3.context.set_intermediate("ingress_appgw_addon_enabled", True)
dec_3.context.set_intermediate("virtual_node_addon_enabled", True)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.add_ingress_appgw_addon_role_assignment"
) as mock_add_ingress_3, patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.add_virtual_node_role_assignment"
) as mock_add_virtual_3:
dec_3.postprocessing_after_mc_created(mc_3)
mock_add_ingress_3.assert_called_once_with(mc_3, self.cmd)
mock_add_virtual_3.assert_called_once_with(self.cmd, mc_3, "test_vnet_subnet_id")
dec_4 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{"attach_acr": "test_attach_acr"},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_4 = self.models.ManagedCluster(
location="test_location",
identity=self.models.ManagedClusterIdentity(type="SystemAssigned"),
identity_profile={
"kubeletidentity": self.models.UserAssignedIdentity(
client_id="test_client_id", object_id="test_object_id"
)
},
)
dec_4.context.attach_mc(mc_4)
mock_profile_4 = Mock(get_subscription_id=Mock(return_value="1234-5678-9012"))
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.Profile", return_value=mock_profile_4
), patch("azure.cli.command_modules.acs.managed_cluster_decorator.ensure_aks_acr") as mock_ensure_4:
dec_4.postprocessing_after_mc_created(mc_4)
mock_ensure_4.assert_called_once_with(
self.cmd,
assignee="test_object_id",
acr_name_or_id="test_attach_acr",
subscription_id="1234-5678-9012",
is_service_principal=False,
)
def test_put_mc(self):
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{"enable_msi_auth_for_monitoring": False},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(location="test_location")
dec_1.context.attach_mc(mc_1)
with patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.sdk_no_wait",
return_value=mc_1,
):
self.assertEqual(dec_1.put_mc(mc_1), mc_1)
dec_1.context.set_intermediate("monitoring_addon_enabled", True)
mock_profile = Mock(get_subscription_id=Mock(return_value="1234-5678-9012"))
with patch("azure.cli.command_modules.acs.managed_cluster_decorator.Profile", return_value=mock_profile), patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.add_monitoring_role_assignment"
) as mock_add, patch(
"azure.cli.command_modules.acs.managed_cluster_decorator.LongRunningOperation",
return_value=Mock(return_value=mc_1),
):
self.assertEqual(dec_1.put_mc(mc_1), mc_1)
mock_add.assert_called_once_with(mc_1, ANY, self.cmd)
def test_update_mc(self):
dec_1 = AKSManagedClusterUpdateDecorator(
self.cmd,
self.client,
{},
ResourceType.MGMT_CONTAINERSERVICE,
)
mc_1 = self.models.ManagedCluster(
location="test_location",
)
dec_1.context.attach_mc(mc_1)
with patch("azure.cli.command_modules.acs.managed_cluster_decorator.AKSManagedClusterCreateDecorator.put_mc"):
dec_1.update_mc(mc_1)
if __name__ == "__main__":
unittest.main()
| 38.414692 | 184 | 0.608056 |
acfa3602643b97782b777c24f598d02e06bd30e7 | 826 | py | Python | scripts/sequence_clusters/eggnog/split_proteins_per_species.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | 10 | 2015-04-28T14:15:04.000Z | 2021-03-15T00:07:38.000Z | scripts/sequence_clusters/eggnog/split_proteins_per_species.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | null | null | null | scripts/sequence_clusters/eggnog/split_proteins_per_species.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | 6 | 2017-03-16T22:38:41.000Z | 2021-08-11T00:22:52.000Z | #!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from RouToolPa.Routines import EggNOGRoutines
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_pep_dir", action="store", dest="input_pep_dir", required=True,
type=EggNOGRoutines.check_path,
help="Directory with input proteins(splited by families or something like)")
parser.add_argument("-o", "--output_pep_dir", action="store", dest="output_pep_dir", required=True,
type=EggNOGRoutines.check_path,
help="Directory to write proteins splited by species")
args = parser.parse_args()
EggNOGRoutines.split_proteins_per_species(args.input_pep_dir, args.output_pep_dir,
input_format="fasta", output_format="fasta")
| 43.473684 | 99 | 0.676755 |
acfa3742e15466e5e1c75aa352113e80c106c4ce | 7,040 | py | Python | softwares/ET_predictor/add_and_run_job.py | urubens/CellCounting | fee50ec6188eb57c18d7e65937870d30a031f932 | [
"MIT"
] | null | null | null | softwares/ET_predictor/add_and_run_job.py | urubens/CellCounting | fee50ec6188eb57c18d7e65937870d30a031f932 | [
"MIT"
] | null | null | null | softwares/ET_predictor/add_and_run_job.py | urubens/CellCounting | fee50ec6188eb57c18d7e65937870d30a031f932 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# * Copyright (c) 2009-2017. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import pickle
import tempfile
from argparse import ArgumentParser
import numpy as np
from cytomine import Cytomine
from cytomine.models import AnnotationCollection, Annotation
from cytomine_utilities import CytomineJob
from sldc import StandardOutputLogger, Logger
from cell_counting.utils import make_dirs, params_remove_none
from cell_counting.cytomine_utils import upload_annotations
__author__ = "Rubens Ulysse <urubens@uliege.be>"
__copyright__ = "Copyright 2010-2017 University of Liège, Belgium, http://www.cytomine.be/"
def predict(argv):
parser = ArgumentParser(prog="Extra-Trees Object Counter Predictor")
# Cytomine
parser.add_argument('--cytomine_host', dest='cytomine_host',
default='demo.cytomine.be', help="The Cytomine host")
parser.add_argument('--cytomine_public_key', dest='cytomine_public_key',
help="The Cytomine public key")
parser.add_argument('--cytomine_private_key', dest='cytomine_private_key',
help="The Cytomine private key")
parser.add_argument('--cytomine_base_path', dest='cytomine_base_path',
default='/api/', help="The Cytomine base path")
parser.add_argument('--cytomine_working_path', dest='cytomine_working_path',
default=None, help="The working directory (eg: /tmp)")
parser.add_argument('--cytomine_id_software', dest='cytomine_software', type=int,
help="The Cytomine software identifier")
parser.add_argument('--cytomine_id_project', dest='cytomine_project', type=int,
help="The Cytomine project identifier")
# Objects
parser.add_argument('--cytomine_object_term', dest='cytomine_object_term', type=int,
help="The Cytomine identifier of object term")
# Post-processing
parser.add_argument('--post_threshold', dest='post_threshold', type=float,
help="Post-processing discarding threshold")
parser.add_argument('--post_sigma', dest='post_sigma', type=float,
help="Std-dev of Gauss filter applied to smooth prediction")
parser.add_argument('--post_min_dist', dest='post_min_dist', type=int,
help="Minimum distance between two peaks")
# ROI
parser.add_argument('--annotation', dest='annotation', type=int, default=None)
parser.add_argument('--image', dest='image', type=int, action='append', default=None)
# Execution
parser.add_argument('--n_jobs', dest='n_jobs', type=int, default=1, help="Number of jobs")
parser.add_argument('--verbose', '-v', dest='verbose', default=0, help="Level of verbosity")
parser.add_argument('--model_id_job', dest='model_id_job', type=int, default=None, help="Model job ID")
params, other = parser.parse_known_args(argv)
if params.cytomine_working_path is None:
params.cytomine_working_path = os.path.join(tempfile.gettempdir(), "cytomine")
make_dirs(params.cytomine_working_path)
# Initialize logger
logger = StandardOutputLogger(params.verbose)
for key, val in sorted(vars(params).iteritems()):
logger.info("[PARAMETER] {}: {}".format(key, val))
# Initialize Cytomine client
cytomine = Cytomine(
params.cytomine_host,
params.cytomine_public_key,
params.cytomine_private_key,
working_path=params.cytomine_working_path,
base_path=params.cytomine_base_path,
verbose=(params.verbose >= Logger.DEBUG)
)
# Start job
with CytomineJob(cytomine,
params.cytomine_software,
params.cytomine_project,
parameters=vars(params_remove_none(params))) as job:
cytomine.update_job_status(job.job, status_comment="Starting...", progress=0)
cytomine.update_job_status(job.job, status_comment="Loading model...", progress=1)
model_job = cytomine.get_job(params.model_id_job)
model_file = os.path.join(params.cytomine_working_path, "models", str(model_job.software),
"{}.pkl".format(model_job.id))
with open(model_file, 'rb') as f:
estimator = pickle.load(f)
cytomine.update_job_status(job.job, status_comment="Dumping annotations/images to predict...", progress=3)
if params.annotation is not None:
if not isinstance(params.annotation, list):
params.annotation = list(params.annotation)
annots = [cytomine.get_annotation(id) for id in params.annotation]
annots_collection = AnnotationCollection()
annots_collection._data = annots
crops = cytomine.dump_annotations(annotations=annots_collection,
dest_path=os.path.join(params.cytomine_working_path, "crops",
str(params.cytomine_project)),
desired_zoom=0,
get_image_url_func=Annotation.get_annotation_alpha_crop_url)
X = crops.data()
elif params.image is not None:
if not isinstance(params.image, list):
params.image = list(params.image)
image_instances = [cytomine.get_image_instance(id) for id in params.image]
image_instances = cytomine.dump_project_images(id_project=params.cytomine_project,
dest_path="/imageinstances/",
image_instances=image_instances)
X = image_instances
else:
X = []
logger.d("X size: {} samples".format(len(X)))
for i, x in enumerate(X):
cytomine.update_job_status(job.job, status_comment="Predicting ID {}...".format(x.id),
progress=5 + np.ceil(i / len(X)) * 95)
y = estimator.predict([x.filename])
cytomine.update_job_status(job.job, status_comment="Uploading annotations...")
upload_annotations(cytomine, x, y, term=params.cytomine_object_term)
cytomine.update_job_status(job.job, status_comment="Finished.", progress=100)
if __name__ == '__main__':
import sys
predict(sys.argv[1:])
| 46.933333 | 114 | 0.642898 |
acfa3774fb60c7df32bbe69ef08feb7cfe0e75ee | 7,476 | py | Python | src/pretalx/common/models/settings.py | hrchu/pretalx | cd7e5525f80c7290d9650065b4cf4f085032adfc | [
"Apache-2.0"
] | 3 | 2020-03-28T06:21:27.000Z | 2020-03-28T12:59:21.000Z | src/pretalx/common/models/settings.py | hrchu/pretalx | cd7e5525f80c7290d9650065b4cf4f085032adfc | [
"Apache-2.0"
] | 14 | 2020-03-27T22:46:38.000Z | 2020-03-29T18:40:02.000Z | src/pretalx/common/models/settings.py | hrchu/pretalx | cd7e5525f80c7290d9650065b4cf4f085032adfc | [
"Apache-2.0"
] | 4 | 2020-03-21T10:33:20.000Z | 2020-03-28T10:14:19.000Z | import datetime as dt
import json
import uuid
from django.utils.translation import gettext_noop
from hierarkey.models import GlobalSettingsBase, Hierarkey
from i18nfield.strings import LazyI18nString
hierarkey = Hierarkey(attribute_name="settings")
@hierarkey.set_global()
class GlobalSettings(GlobalSettingsBase):
def get_instance_identifier(self):
instance_identifier = self.settings.get("instance_identifier")
if not instance_identifier:
instance_identifier = uuid.uuid4()
self.settings.set("instance_identifier", str(instance_identifier))
else:
instance_identifier = uuid.UUID(instance_identifier)
return instance_identifier
def i18n_unserialise(value):
try:
return LazyI18nString(json.loads(value))
except ValueError:
return LazyI18nString(str(value))
hierarkey.add_type(
LazyI18nString, serialize=lambda s: json.dumps(s.data), unserialize=i18n_unserialise
)
hierarkey.add_default("show_on_dashboard", "True", bool)
hierarkey.add_default("show_schedule", "True", bool)
hierarkey.add_default("schedule_display", "proportional", str)
hierarkey.add_default("show_sneak_peek", "True", bool)
hierarkey.add_default("show_widget_if_not_public", "False", bool)
hierarkey.add_default("export_html_on_schedule_release", "False", bool)
hierarkey.add_default("imprint_url", None, str)
hierarkey.add_default("html_export_url", "", str)
hierarkey.add_default("custom_domain", "", str)
hierarkey.add_default("use_tracks", "True", bool)
hierarkey.add_default("present_multiple_times", "False", bool)
hierarkey.add_default("display_header_pattern", "", str)
hierarkey.add_default("cfp_flow", "", str)
hierarkey.add_default("update_check_ack", "False", bool)
hierarkey.add_default("update_check_email", "", str)
hierarkey.add_default("update_check_enabled", "True", bool)
hierarkey.add_default("update_check_result", None, dict)
hierarkey.add_default("update_check_result_warning", "False", bool)
hierarkey.add_default("update_check_last", None, dt.datetime)
hierarkey.add_default("update_check_id", None, str)
hierarkey.add_default("cfp_request_title", "True", bool)
hierarkey.add_default("cfp_request_abstract", "True", bool)
hierarkey.add_default("cfp_request_description", "True", bool)
hierarkey.add_default("cfp_request_biography", "True", bool)
hierarkey.add_default("cfp_request_avatar", "True", bool)
hierarkey.add_default("cfp_request_availabilities", "True", bool)
hierarkey.add_default("cfp_request_notes", "True", bool)
hierarkey.add_default("cfp_request_do_not_record", "True", bool)
hierarkey.add_default("cfp_request_image", "True", bool)
hierarkey.add_default("cfp_request_track", "False", bool)
hierarkey.add_default("cfp_request_duration", "False", bool)
hierarkey.add_default("cfp_require_title", "True", bool)
hierarkey.add_default("cfp_require_abstract", "True", bool)
hierarkey.add_default("cfp_require_description", "False", bool)
hierarkey.add_default("cfp_require_availabilities", "False", bool)
hierarkey.add_default("cfp_require_biography", "True", bool)
hierarkey.add_default("cfp_require_avatar", "False", bool)
hierarkey.add_default("cfp_require_notes", "False", bool)
hierarkey.add_default("cfp_require_do_not_record", "False", bool)
hierarkey.add_default("cfp_require_image", "False", bool)
hierarkey.add_default("cfp_require_track", "False", bool)
hierarkey.add_default("cfp_require_duration", "False", bool)
hierarkey.add_default("cfp_count_length_in", "chars", str)
hierarkey.add_default("cfp_title_min_length", None, int)
hierarkey.add_default("cfp_abstract_min_length", None, int)
hierarkey.add_default("cfp_description_min_length", None, int)
hierarkey.add_default("cfp_biography_min_length", None, int)
hierarkey.add_default("cfp_title_max_length", None, int)
hierarkey.add_default("cfp_abstract_max_length", None, int)
hierarkey.add_default("cfp_description_max_length", None, int)
hierarkey.add_default("cfp_biography_max_length", None, int)
hierarkey.add_default("allow_override_votes", "False", bool)
hierarkey.add_default("review_min_score", 0, int)
hierarkey.add_default("review_max_score", 1, int)
hierarkey.add_default("review_score_mandatory", "False", bool)
hierarkey.add_default("review_text_mandatory", "False", bool)
hierarkey.add_default(
"review_help_text",
LazyI18nString.from_gettext(
gettext_noop(
"Please give a fair review on why you'd like to see this submission at the conference, or why you think it would not be a good fit."
)
),
LazyI18nString,
)
hierarkey.add_default("mail_from", "", str)
hierarkey.add_default("mail_reply_to", "", str)
hierarkey.add_default("mail_subject_prefix", "", str)
hierarkey.add_default("mail_signature", "", str)
hierarkey.add_default("smtp_use_custom", "False", bool)
hierarkey.add_default("smtp_host", "", str)
hierarkey.add_default("smtp_port", "587", int)
hierarkey.add_default("smtp_username", "", str)
hierarkey.add_default("smtp_password", "", str)
hierarkey.add_default("smtp_use_tls", "True", bool)
hierarkey.add_default("smtp_use_ssl", "False", bool)
hierarkey.add_default("mail_on_new_submission", "False", bool)
hierarkey.add_default(
"mail_text_new_submission",
LazyI18nString.from_gettext(
gettext_noop(
"""Hi,
you have received a new submission for your event {event_name}:
“{submission_title}” by {speakers}.
You can see details at
{orga_url}
All the best,
your {event_name} CfP system.
"""
)
),
LazyI18nString,
)
hierarkey.add_default("sent_mail_event_created", "False", bool)
hierarkey.add_default("sent_mail_cfp_closed", "False", bool)
hierarkey.add_default("sent_mail_event_over", "False", bool)
hierarkey.add_default(
"mail_text_event_created",
LazyI18nString.from_gettext(
gettext_noop(
"""Hi,
we hope you're happy with pretalx as your event's CfP system.
These links may be helpful in the coming days and weeks:
- Your event's dashboard: {event_dashboard}
- A list of submissions: {event_submissions}
- Your schedule editor: {event_schedule}
If there is anything you're missing, come tell us about it
at https://github.com/pretalx/pretalx/issues/new or via an
email to support@pretalx.com!
"""
)
),
LazyI18nString,
)
hierarkey.add_default(
"mail_text_cfp_closed",
LazyI18nString.from_gettext(
gettext_noop(
"""Hi,
just writing you to let you know that your Call for Participation is now
closed. You'll find a list of all your {submission_count} submissions here:
{event_submissions}
You can add reviewers here: {event_team}
You can review submissions here: {event_review}
And create your schedule here, once you have accepted submissions: {event_schedule}
"""
)
),
LazyI18nString,
)
hierarkey.add_default(
"mail_text_event_over",
LazyI18nString.from_gettext(
gettext_noop(
"""Hi,
congratulations, your event is over! Hopefully it went well. Here are some
statistics you might find interesting:
- You had {submission_count} talk submissions,
- Of which you selected {talk_count} talks.
- The reviewers wrote {review_count} reviews.
- You released {schedule_count} schedules in total.
- Over the course of the event, you sent {mail_count} mails.
If there is anything you're missing, come tell us about it
at https://github.com/pretalx/pretalx/issues/new or via an
email to support@pretalx.com!
"""
)
),
LazyI18nString,
)
| 36.115942 | 144 | 0.7603 |
acfa377b04534bc17143262cfc44f7659450dbe8 | 11,960 | py | Python | src/jose/jwe.py | hdknr/jose | d872407e9f3b3a0262e6bb1cdb599b5c4c1d9ee4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/jose/jwe.py | hdknr/jose | d872407e9f3b3a0262e6bb1cdb599b5c4c1d9ee4 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2015-01-23T09:37:17.000Z | 2015-01-23T09:37:17.000Z | src/jose/jwe.py | hdknr/jose | d872407e9f3b3a0262e6bb1cdb599b5c4c1d9ee4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from jose.crypto import Crypto, CryptoMessage
from jose.jwa.encs import EncEnum, KeyEncEnum
from jose.base import BaseEnum, BaseObject, JoseException
from jose.jwk import Jwk
from jose.utils import merged, _BD, _BE
import re
import traceback
import zlib
try:
from exceptions import AttributeError
except:
pass
# http://tools.ietf.org/html/draft-ietf-jose-json-web-encryption-23#section-7.1
_component = [
r'^(?P<header>[^\.]+)',
r'(?P<encrypted_key>[^\.]*)', #: blank for shared key.
r'(?P<iv>[^\.]+)',
r'(?P<ciphertext>[^\.]+)',
r'(?P<tag>[^\.]+)$',
]
_compact = re.compile('\.'.join(_component))
class NotJweException(JoseException):
pass
class ZipEnum(BaseEnum):
DEF = 'DEF'
def compress(self, data):
if self.value == 'DEF':
return zlib.compress(data)
return data
def uncompress(self, data):
if data and self.value == 'DEF':
return zlib.decompress(data)
return data
BASE_FIELD = dict(
enc=None, #: EncEnum Algorithm
zip=None, #: ZipEnum Compression Algorithm
)
GCM_FIELD = dict(
iv=None, #: IV for Key Wrap
tag=None, #: Auth Tag for Key Wrap
)
ECDH_FIELD = dict(
epk=None, #: Ephemeral Public Key
apu=None, #: Agreement ParytUInfo
apv=None, #: Agreement PartyVInf
)
PBES_FIELD = dict(
p2s=None, #: Salt for PBKDF2
p2c=None, #: Loop counts for PBKDF2
)
class Jwe(Crypto):
_fields = merged([
Crypto._fields, BASE_FIELD,
GCM_FIELD, ECDH_FIELD, PBES_FIELD
])
def __init__(self, **kwargs):
super(Jwe, self).__init__(**kwargs)
self.alg = self.alg and KeyEncEnum(self.alg)
self.zip = self.zip and ZipEnum(self.zip)
print("@@@@Jwe:", self.enc)
self.enc = self.enc and EncEnum(self.enc)
if isinstance(self.epk, dict):
self.epk = Jwk(**self.epk)
# if isinstance(self.apu, unicode):
# self.apu = self.apu.encode('utf8')
# if isinstance(self.apv, unicode):
# self.apv = self.apv.encode('utf8')
@classmethod
def from_json(cls, json_str, base=None):
obj = BaseObject.from_json(json_str, cls)
obj.enc = EncEnum.create(obj.enc)
obj.zip = ZipEnum.create(obj.zip)
return obj
@classmethod
def from_token(cls, token):
m = re.search(_compact)
return m and cls()
def to_token(self):
return ''
def provide_key(self, jwk):
return self.alg.encryptor.provide(self.enc, jwk, self)
def agree_key(self, jwk, cek_ci):
return self.alg.encryptor.agree(self.enc, jwk, self, cek_ci)
class Recipient(BaseObject):
''' Per Receiver CEK Management
'''
_fields = dict(
header=None, # JWE Per-Recipient Unprotected Header
encrypted_key=None, # BASE64URL(JWE Encrypted Key)
)
_excludes = [
'recipient', 'cek', 'iv', ]
def __init__(self, recipient=None, iv=None, cek=None, **kwargs):
super(Recipient, self).__init__(**kwargs)
# Jwe
if isinstance(self.header, basestring):
self.header = Jwe.from_b64u(self.header)
elif isinstance(self.header, dict):
self.header = Jwe(**self.header)
self.recipient = recipient
self.cek = cek
self.iv = iv
def provide_key(self, jwk, cek=None, iv=None, jwe=None):
jwe = Jwe.merge(self.header, jwe)
assert jwk and isinstance(jwk, Jwk), \
"Recipient's Jwk must be specified."
assert jwe
assert jwe.enc
assert jwe.alg
if jwk.kid and not self.header.kid:
self.header.kid = jwk.kid
(self.cek, self.iv, self.encrypted_key, kek
) = jwe.alg.encryptor.provide(jwe.enc, jwk, self.header, cek, iv)
self.encrypted_key = _BE(self.encrypted_key)
return self.cek, self.iv
def agree_key(self, jwk, jwe=None):
jwe = Jwe.merge(self.header, jwe)
assert jwk.is_private, "Agreement jwk must be private."
self.cek = jwe.alg.encryptor.agree(
jwe.enc, jwk, self.header,
_BD(self.encrypted_key))
return self.cek
class Message(CryptoMessage):
''' Encryptoed Message Container
'''
_fields = dict(
protected=None, # BASE64URL(UTF8(JWE Protected Header))
unprotected=None, # JWE Shared Unprotected Header (Json)
iv='', # BASE64URL(JWE Initialization Vector)
aad='', # BASE64URL(JWE AAD))
# (only used for Json Serialization)
ciphertext='', # BASE64(JWE Ciphertext)
tag='', # BASE64URL(JWE Authentication Tag)
recipients=[], # array of Recipient
)
_excludes = ['cek', ]
def __init__(self, plaintext=None, *args, **kwargs):
self._protected = Jwe() # `protected` cache as Jwe object
self._plaintext = plaintext
self.cek = None
self.verified = False
super(Message, self).__init__(*args, **kwargs)
self._convert_recipients(self.recipients)
if isinstance(self.protected, basestring):
self._protected = Jwe.from_b64u(self.protected)
if isinstance(self.protected, unicode):
self.protected = self.protected.encode('utf8')
elif isinstance(self.protected, Jwe):
self._protected = self.protected
self.protected = self._protected.to_b64u()
if isinstance(self.unprotected, dict):
self.unprotected = Jwe(**self.unprotected)
def _convert_recipients(self, src):
if not isinstance(src, list):
return
new = []
for r in src:
if isinstance(r, Recipient):
new.append(r)
elif isinstance(r, dict):
new.append(Recipient(**r))
self.recipients = new
def header(self, index=-1, jwe=None):
return Jwe.merge(
self._protected,
self.unprotected,
self.recipients[index].header if index >= 0 else None,
jwe,
)
@property
def auth_data(self):
if self.aad:
# self.aad is exclusively for JSON Serializatio
# Jwe 5.1
return self.protected + "." + self.aad
return self.protected
def zip(self, src, unzip=False):
''' if "protected" has "zip", compress src
<Spec Jwe 4.1.3>
'''
if self._protected and self._protected.zip:
if unzip:
return self._protected.zip.uncompress(src)
else:
return self._protected.zip.compress(src)
return src
def encrypt(self, header=None, auth_data=None):
auth_data = auth_data or self.auth_data
header = header or self.header()
assert self.cek
assert self.iv
assert self.auth_data
plaint = self.zip(self.plaintext) # 'zip' compression
ciphert, tag = header.enc.encryptor.encrypt(
self.cek, plaint, _BD(self.iv), auth_data)
return (ciphert, tag)
def add_recipient(self, recipient):
''' before call, recipient has to be provided with
messsage's CEK and IV
'''
header = self.header(jwe=recipient.header)
key = header.load_key(recipient.recipient)
assert key is not None, "Recipient's key MUST be loaded."
if len(self.recipients) < 1:
#: Provide CEK & IV
(self.cek, self.iv) = recipient.provide_key(
key, jwe=header)
self.iv = _BE(self.iv)
else:
# use existent cek and iv
assert self.cek
assert self.iv
recipient.provide_key(key, self.cek, self.iv, jwe=header)
self.recipients.append(recipient)
def find_cek(self, jwk=None):
''' force to use jwk '''
header = self.header()
for recipient in self.recipients:
jwk = jwk or recipient.header.load_key(recipient.recipient)
if jwk:
#: key agreement fails if receiver is not me.
self.cek = recipient.agree_key(jwk, jwe=header)
return self.cek
else:
# TODO log
pass
return None
def decrypt(self, jwk=None):
if not self.cek:
self.find_cek(jwk)
header = self.header() # Two Jwe headered are merged.
assert self.cek
assert self.ciphertext
assert self.iv
assert self.tag
plaint, is_valid = header.enc.encryptor.decrypt(
self.cek,
_BD(self.ciphertext),
_BD(self.iv),
self.auth_data,
_BD(self.tag))
# TODO: is_valid == False, raise execption
self.verified = is_valid #: TODO
return self.zip(plaint, unzip=True)
def get_plaintext(self, jwk=None):
#: If CEK has not been found
if not self.cek:
self.find_cek(jwk)
self._plaintext = self.decrypt()
return self._plaintext
@property
def plaintext(self):
if self._plaintext:
# already decrypted and cached
return self._plaintext
return self.decrypt()
@plaintext.setter
def plaintext(self, value):
# CEK is not serizalied.
self._plaintext = value
def text(self):
return self.plaintext
@classmethod
def from_token(cls, token, sender, receiver):
'''
:param token: Serialized Jws (JSON or Compact)
:param str sender: Message sender identifier
'''
try:
message = cls.from_json(token)
for rec in message.recipients:
rec.recipient = receiver
return message
except ValueError:
#: fall to compact serialization
pass
except:
print(traceback.format_exc())
return cls.parse_token(token, sender, receiver)
@classmethod
def parse_token(cls, token, sender, recipient):
'''
:param token: Compact Serialization
:param str sender: Message sender identifier
'''
m = {}
try:
m = _compact.search(token).groupdict()
except AttributeError:
raise NotJweException(
'Token is not JWE', None, token, sender, recipient)
header = Jwe.from_b64u(m.get('header', None))
recipient = dict(
recipient=recipient,
header=header,
encrypted_key=m.get('encrypted_key', None),
)
message = Message(
protected=m.get('header', None),
iv=m.get('iv', None),
tag=m.get('tag', None),
ciphertext=m.get('ciphertext', None),
recipients=[recipient]
)
assert len(message.recipients) == 1
return message
def serialize_json(self, **kwargs):
assert self.iv
assert self.cek
#: Content encryption
(self.ciphertext,
self.tag) = self.encrypt()
self.ciphertext = _BE(self.ciphertext)
self.tag = _BE(self.tag)
return self.to_json(**kwargs)
def serialize_compact(self, index=0):
if len(self.recipients) < 1:
return None
header = self.header(index) # all header togher
header_b64u = header.to_b64u() # auth_data = _BE(header)
#: encrypt with same CEK+IV, but with new auth_data
ciphertext, tag = self.encrypt(header, header_b64u)
#: Tokenize
return ".".join([
header_b64u,
self.recipients[0].encrypted_key or '',
self.iv,
_BE(ciphertext),
_BE(tag)])
def verify(self):
return self.verified # TODO
| 28.544153 | 79 | 0.570067 |
acfa38e03ba39040d001f9c6171a12f63220143e | 3,236 | py | Python | profiles_project/settings.py | rpothamLearner/profiles-rest-api | f0b6dca464ea64fe2db633bca575627d3280194d | [
"MIT"
] | null | null | null | profiles_project/settings.py | rpothamLearner/profiles-rest-api | f0b6dca464ea64fe2db633bca575627d3280194d | [
"MIT"
] | null | null | null | profiles_project/settings.py | rpothamLearner/profiles-rest-api | f0b6dca464ea64fe2db633bca575627d3280194d | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-k1_taidv7b(r5pk2+p^^$%fnc6+^h6ay7sh)@+jann3a#pj2@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| 25.480315 | 91 | 0.69932 |
acfa391af70894425628a7a0d31305077c7a0df8 | 140 | py | Python | manager/apps.py | lameinthebox/cid-backend | 1258000ab7801ebe2f6aef2a4006d3b35c9c88d8 | [
"MIT"
] | null | null | null | manager/apps.py | lameinthebox/cid-backend | 1258000ab7801ebe2f6aef2a4006d3b35c9c88d8 | [
"MIT"
] | 14 | 2018-10-08T19:49:25.000Z | 2022-03-11T23:36:30.000Z | manager/apps.py | lameinthebox/cid-backend | 1258000ab7801ebe2f6aef2a4006d3b35c9c88d8 | [
"MIT"
] | 2 | 2018-09-19T20:49:17.000Z | 2018-10-08T08:12:50.000Z | from django.apps import AppConfig
class ManagerConfig(AppConfig):
name = 'manager'
def ready(self):
import manager.tasks
| 15.555556 | 33 | 0.692857 |
acfa394508a2e9fc17bec7ececb2ca00e1d2a11e | 56,203 | py | Python | tensorflow/python/distribute/coordinator/cluster_coordinator.py | wainshine/tensorflow | dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d | [
"Apache-2.0"
] | 54 | 2017-06-17T14:07:48.000Z | 2022-03-29T02:11:20.000Z | tensorflow/python/distribute/coordinator/cluster_coordinator.py | wainshine/tensorflow | dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d | [
"Apache-2.0"
] | 14 | 2018-08-11T00:55:13.000Z | 2022-03-14T23:24:30.000Z | tensorflow/python/distribute/coordinator/cluster_coordinator.py | wainshine/tensorflow | dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d | [
"Apache-2.0"
] | 11 | 2018-04-19T22:36:01.000Z | 2021-08-02T08:44:43.000Z | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for `ClusterCoordinator` and relevant cluster-worker related library.
This is currently under development and the API is subject to change.
"""
import contextlib
import os
import re
import threading
import time
import weakref
from six.moves import queue
from tensorflow.python.distribute import parameter_server_strategy_v2
from tensorflow.python.distribute.coordinator import coordinator_context
from tensorflow.python.distribute.coordinator import metric_utils
from tensorflow.python.distribute.coordinator import values as values_lib
from tensorflow.python.distribute.coordinator import watchdog
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import executor
from tensorflow.python.eager import function as tf_function
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# Maximum time for failed worker to come back is 1 hour
_WORKER_MAXIMUM_RECOVERY_SEC = 3600
# Maximum size for queued closures, "infinite" if set to 0.
# When the maximum queue size is reached, further schedule calls will become
# blocking until some previously queued closures are executed on workers.
# Note that using an "infinite" queue size can take a non-trivial portion of
# memory, and even lead to coordinator OOM. Modify the size to a smaller value
# for coordinator with constrained memory resource (only recommended for
# advanced users). Also used in unit tests to ensure the correctness when the
# queue is full.
_CLOSURE_QUEUE_MAX_SIZE = 256 * 1024
# RPC error message from PS
_RPC_ERROR_FROM_PS = "GRPC error information from remote target /job:ps"
# InvalidArgumentError (unknown device) will not have "GRPC error..." string.
_JOB_WORKER_STRING_IDENTIFIER = "/job:worker"
RemoteValueStatus = values_lib.RemoteValueStatus
RemoteValue = values_lib.RemoteValue
RemoteValueImpl = values_lib.RemoteValueImpl
PerWorkerValues = values_lib.PerWorkerValues
class InputError(Exception):
def __init__(self, original_exception):
self.original_exception = original_exception
message = ("Input has an error, the original exception is %r, "
"error message is %s." %
(original_exception, str(original_exception)))
super().__init__(message)
self.with_traceback(original_exception.__traceback__)
def _maybe_rebuild_remote_values(worker, structure):
"""Attempts to return errors from `RemoteValue`s. Rebuilds them if needed."""
errors_in_structure = []
def _get_error(val):
if isinstance(val, RemoteValue):
if val._status is RemoteValueStatus.ABORTED: # pylint: disable=protected-access
try:
# This attempts to rebuild the resource on the worker, which may fail
# if the worker or PS is unavailable. If it fails, the original
# RemoteValue that requests a result will receive an `InputError`,
# which gets handled by `wait_on_failure` in `process_closure`.
val._rebuild_on(worker) # pylint: disable=protected-access
except Exception as e: # pylint: disable=broad-except
val._set_error(e) # pylint: disable=protected-access
error = val._get_error() # pylint: disable=protected-access
if error:
errors_in_structure.append(error)
nest.map_structure(_get_error, structure)
if errors_in_structure:
return errors_in_structure[0]
else:
return None
def _maybe_get_remote_value(val):
"""Gets the value of `val` if it is a `RemoteValue`."""
if isinstance(val, RemoteValue):
error = val._get_error() # pylint: disable=protected-access
if error:
raise AssertionError(
"RemoteValue doesn't have a value because it has errors.")
else:
return val._get_values() # pylint: disable=protected-access
else:
return val
def _maybe_as_type_spec(val):
if isinstance(val, (RemoteValue, PerWorkerValues)):
if val._type_spec is None: # pylint: disable=protected-access
raise ValueError("Output of a scheduled function that is not "
"tf.function cannot be the input of another function.")
return val._type_spec # pylint: disable=protected-access
else:
return val
def _select_worker_slice(worker_id, structured):
"""Selects the worker slice of each of the items in `structured`."""
def _get(x):
return x._values[worker_id] if isinstance(x, PerWorkerValues) else x # pylint: disable=protected-access
return nest.map_structure(_get, structured)
def _disallow_remote_value_as_input(structured):
"""Raises if any element of `structured` is a RemoteValue."""
def _raise_if_remote_value(x):
if isinstance(x, RemoteValue):
raise ValueError(
"`tf.distribute.experimental.coordinator.RemoteValue` used "
"as an input to scheduled function is not yet "
"supported.")
nest.map_structure(_raise_if_remote_value, structured)
class Closure(object):
"""Hold a function to be scheduled and its arguments."""
def __init__(self, function, cancellation_mgr, args=None, kwargs=None):
if not callable(function):
raise ValueError("Function passed to `ClusterCoordinator.schedule` must "
"be a callable object.")
self._args = args or ()
self._kwargs = kwargs or {}
_disallow_remote_value_as_input(self._args)
_disallow_remote_value_as_input(self._kwargs)
if isinstance(function, def_function.Function):
replica_args = _select_worker_slice(0, self._args)
replica_kwargs = _select_worker_slice(0, self._kwargs)
# Note: no need to handle function registration failure since this kind of
# failure will not raise exceptions as designed in the runtime. The
# coordinator has to rely on subsequent operations that raise to catch
# function registration failure.
# Record the function tracing overhead. Note that we pass in the tracing
# count of the def_function.Function as a state tracker, so that metrics
# will only record the time for actual function tracing (i.e., excluding
# function cache lookups).
with metric_utils.monitored_timer(
"function_tracing", state_tracker=function._get_tracing_count): # pylint: disable=protected-access
self._concrete_function = function.get_concrete_function(
*nest.map_structure(_maybe_as_type_spec, replica_args),
**nest.map_structure(_maybe_as_type_spec, replica_kwargs))
elif isinstance(function, tf_function.ConcreteFunction):
self._concrete_function = function
if hasattr(self, "_concrete_function"):
# If we have a concrete function, we get to retrieve the output type spec
# via the structured_output.
self._output_type_spec = func_graph.convert_structure_to_signature(
self._concrete_function.structured_outputs)
self._function = cancellation_mgr.get_cancelable_function(
self._concrete_function)
else:
# Otherwise (i.e. what is passed in is a regular python function), we have
# no such information.
self._output_type_spec = None
self._function = function
self._output_remote_value_ref = None
def build_output_remote_value(self):
if self._output_remote_value_ref is None:
ret = RemoteValueImpl(None, self._output_type_spec)
self._output_remote_value_ref = weakref.ref(ret)
return ret
else:
raise ValueError(
"The output of the Closure cannot be built more than once.")
def maybe_call_with_output_remote_value(self, method):
if self._output_remote_value_ref is None:
return None
output_remote_value = self._output_remote_value_ref()
if output_remote_value is not None:
return method(output_remote_value)
return None
def mark_cancelled(self):
e = errors.CancelledError(
None, None, "The corresponding function is "
"cancelled. Please reschedule the function.")
self.maybe_call_with_output_remote_value(lambda r: r._set_error(e)) # pylint: disable=protected-access
def execute_on(self, worker):
"""Executes the closure on the given worker.
Args:
worker: a `Worker` object.
"""
replica_args = _select_worker_slice(worker.worker_index, self._args)
replica_kwargs = _select_worker_slice(worker.worker_index, self._kwargs)
e = (
_maybe_rebuild_remote_values(worker, replica_args) or
_maybe_rebuild_remote_values(worker, replica_kwargs))
if e:
if not isinstance(e, InputError):
e = InputError(e)
raise e
with ops.device(worker.device_name):
with context.executor_scope(worker.executor):
with coordinator_context.with_dispatch_context(worker):
with metric_utils.monitored_timer("closure_execution"):
output_values = self._function(
*nest.map_structure(_maybe_get_remote_value, replica_args),
**nest.map_structure(_maybe_get_remote_value, replica_kwargs))
self.maybe_call_with_output_remote_value(
lambda r: r._set_values(output_values)) # pylint: disable=protected-access
class ResourceClosure(Closure):
def build_output_remote_value(self):
if self._output_remote_value_ref is None:
# We need to remember the Closure object in the `RemoteValue` here.
ret = RemoteValueImpl(self, self._output_type_spec)
self._output_remote_value_ref = weakref.ref(ret)
return ret
else:
return self._output_remote_value_ref()
class _CoordinatedClosureQueue(object):
"""Manage a queue of closures, inflight count and errors from execution.
This class is thread-safe.
"""
def __init__(self):
# `self._inflight_closure_count` only tracks the number of inflight closures
# that are "in generation". Once an error occurs, error generation is
# incremented and all subsequent arriving closures (from inflight) are
# considered "out of generation".
self._inflight_closure_count = 0
self._queue_lock = threading.Lock()
# Condition indicating that all pending closures (either queued or inflight)
# have been processed, failed, or cancelled.
self._stop_waiting_condition = threading.Condition(self._queue_lock)
# Condition indicating that an item becomes available in queue (not empty).
self._closures_queued_condition = threading.Condition(self._queue_lock)
self._should_process_closures = True
# Condition indicating that a queue slot becomes available (not full).
# Note that even with "infinite" queue size, there is still a "practical"
# size limit for the queue depending on host memory capacity, and thus the
# queue will eventually become full with a lot of enqueued closures.
self._queue_free_slot_condition = threading.Condition(self._queue_lock)
# Condition indicating there is no inflight closures.
self._no_inflight_closure_condition = threading.Condition(self._queue_lock)
# Use to cancel in-flight closures.
self._cancellation_mgr = cancellation.CancellationManager()
if _CLOSURE_QUEUE_MAX_SIZE <= 0:
logging.warning(
"In a `ClusterCoordinator`, creating an infinite closure queue can "
"consume a significant amount of memory and even lead to OOM.")
self._queue = queue.Queue(maxsize=_CLOSURE_QUEUE_MAX_SIZE)
self._error = None
# The following is a lock to make sure when `wait` is called and before it
# returns no `put` can be executed during this period. It is because `wait`
# won't know what to do with newly put closures. This lock adds an cutoff
# for `wait` so that closures put into the queue while waiting would not be
# taken responsible by this `wait`.
#
# We cannot reuse the `self._queue_lock` since when `wait` waits for a
# condition, the `self._queue_lock` will be released.
#
# We don't use a reader/writer's lock on purpose to reduce the complexity
# of the code.
self._put_wait_lock = threading.Lock()
self._watchdog = watchdog.WatchDog(on_triggered=self._on_watchdog_timeout)
def _on_watchdog_timeout(self):
logging.info("inflight_closure_count is %d", self._inflight_closure_count)
logging.info("current error is %s:%r", self._error, self._error)
def stop(self):
with self._queue_lock:
self._should_process_closures = False
self._closures_queued_condition.notify_all()
self._watchdog.stop()
def _cancel_all_closures(self):
"""Clears the queue and sets remaining closures cancelled error.
This method expects self._queue_lock to be held prior to entry.
"""
self._cancellation_mgr.start_cancel()
while self._inflight_closure_count > 0:
self._no_inflight_closure_condition.wait()
while True:
try:
closure = self._queue.get(block=False)
self._queue_free_slot_condition.notify()
closure.mark_cancelled()
except queue.Empty:
break
# The cancellation manager cannot be reused once cancelled. After all
# closures (queued or inflight) are cleaned up, recreate the cancellation
# manager with clean state.
# Note on thread-safety: this is triggered when one of theses
# ClusterCoordinator APIs are called: `schedule`, `wait`, and `done`. At the
# same time, no new closures can be constructed (which reads the
# _cancellation_mgr to get cancellable functions).
self._cancellation_mgr = cancellation.CancellationManager()
def _raise_if_error(self):
"""Raises the error if one exists.
If an error exists, cancel the closures in queue, raises it, and clear
the error.
This method expects self._queue_lock to be held prior to entry.
"""
if self._error:
logging.error("Start cancelling closures due to error %r: %s",
self._error, self._error)
self._cancel_all_closures()
try:
raise self._error # pylint: disable=raising-bad-type
finally:
self._error = None
def put(self, closure):
"""Put a closure into the queue for later execution.
If `mark_failed` was called before `put`, the error from the first
invocation of `mark_failed` will be raised.
Args:
closure: The `Closure` to put into the queue.
"""
with self._put_wait_lock, self._queue_lock:
self._queue_free_slot_condition.wait_for(lambda: not self._queue.full())
self._queue.put(closure, block=False)
self._raise_if_error()
self._closures_queued_condition.notify()
def get(self, timeout=None):
"""Return a closure from the queue to be executed."""
with self._queue_lock:
while self._queue.empty() and self._should_process_closures:
if not self._closures_queued_condition.wait(timeout=timeout):
return None
if not self._should_process_closures:
return None
closure = self._queue.get(block=False)
self._queue_free_slot_condition.notify()
self._inflight_closure_count += 1
return closure
def mark_finished(self):
"""Let the queue know that a closure has been successfully executed."""
with self._queue_lock:
if self._inflight_closure_count < 1:
raise AssertionError("There is no inflight closures to mark_finished.")
self._inflight_closure_count -= 1
if self._inflight_closure_count == 0:
self._no_inflight_closure_condition.notify_all()
if self._queue.empty() and self._inflight_closure_count == 0:
self._stop_waiting_condition.notify_all()
self._watchdog.report_closure_done()
def put_back(self, closure):
"""Put the closure back into the queue as it was not properly executed."""
with self._queue_lock:
if self._inflight_closure_count < 1:
raise AssertionError("There is no inflight closures to put_back.")
if self._error:
closure.mark_cancelled()
else:
self._queue_free_slot_condition.wait_for(lambda: not self._queue.full())
self._queue.put(closure, block=False)
self._closures_queued_condition.notify()
self._inflight_closure_count -= 1
if self._inflight_closure_count == 0:
self._no_inflight_closure_condition.notify_all()
def wait(self, timeout=None):
"""Wait for all closures to be finished before returning.
If `mark_failed` was called before or during `wait`, the error from the
first invocation of `mark_failed` will be raised.
Args:
timeout: A float specifying a timeout for the wait in seconds.
Returns:
True unless the given timeout expired, in which case it returns False.
"""
with self._put_wait_lock, self._queue_lock:
while (not self._error and
(not self._queue.empty() or self._inflight_closure_count > 0)):
if not self._stop_waiting_condition.wait(timeout=timeout):
return False
self._raise_if_error()
return True
def mark_failed(self, e):
"""Sets error and unblocks any wait() call."""
with self._queue_lock:
# TODO(yuefengz): maybe record all failure and give users more
# information?
if self._inflight_closure_count < 1:
raise AssertionError("There is no inflight closures to mark_failed.")
if self._error is None:
self._error = e
self._inflight_closure_count -= 1
if self._inflight_closure_count == 0:
self._no_inflight_closure_condition.notify_all()
self._stop_waiting_condition.notify_all()
def done(self):
"""Returns true if the queue is empty and there is no inflight closure.
If `mark_failed` was called before `done`, the error from the first
invocation of `mark_failed` will be raised.
"""
with self._queue_lock:
self._raise_if_error()
return self._queue.empty() and self._inflight_closure_count == 0
class WorkerPreemptionHandler(object):
"""Handles worker preemptions."""
def __init__(self, server_def, cluster):
self._server_def = server_def
self._cluster = cluster
self._cluster_update_lock = threading.Lock()
self._cluster_due_for_update_or_finish = threading.Event()
self._worker_up_cond = threading.Condition(self._cluster_update_lock)
self._error_from_recovery = None
self._should_preemption_thread_run = True
self._preemption_handler_thread = threading.Thread(
target=self._preemption_handler,
name="WorkerPreemptionHandler",
daemon=True)
self._preemption_handler_thread.start()
def stop(self):
"""Ensure the worker preemption thread is closed."""
self._should_preemption_thread_run = False
with self._cluster_update_lock:
self._cluster_due_for_update_or_finish.set()
# TODO(yuefengz): The preemption handler thread shouldn't be terminated
# asynchronously since it touches eager context which is a process-wide
# singleton. The problem is in OSS unit tests will time out.
def _validate_preemption_failure(self, e):
"""Validates that the given exception represents worker preemption."""
# Only categorize the failure as a worker preemption if the cancellation
# manager did not attempt to cancel the blocking operations.
if _is_worker_failure(e) and (
not self._cluster.closure_queue._cancellation_mgr.is_cancelled): # pylint: disable=protected-access
return
raise e
@contextlib.contextmanager
def wait_on_failure(self,
on_failure_fn=None,
on_transient_failure_fn=None,
on_recovery_fn=None,
worker_device_name="(unknown)"):
"""Catches worker preemption error and wait until failed workers are back.
Args:
on_failure_fn: an optional function to run if preemption happens.
on_transient_failure_fn: an optional function to run if transient failure
happens.
on_recovery_fn: an optional function to run when a worker is recovered
from preemption.
worker_device_name: the device name of the worker instance that is passing
through the failure.
Yields:
None.
"""
assert self._should_preemption_thread_run
try:
yield
except (errors.OpError, InputError) as e:
# If the error is due to temporary connectivity issues between worker and
# ps, put back closure, ignore error and do not mark worker as failure.
if self._cluster._record_and_ignore_transient_ps_failure(e): # pylint: disable=protected-access
logging.error(
"Remote function on worker %s failed with %r:%s\n"
"It is treated as a transient connectivity failure for now.",
worker_device_name, e, e)
if on_transient_failure_fn:
on_transient_failure_fn()
return
# If the error is due to temporary connectivity issues that cause the
# server-side RPCs to be cancelled, TF might not abort the step and the
# closure might timeout. The coordinator ignores certain amount of such
# failures without marking worker as failure.
if self._cluster._record_and_ignore_transient_timeouts(e): # pylint: disable=protected-access
logging.error(
"Remote function on worker %s failed with %r:%s\n"
"This derived error is ignored and not reported to users.",
worker_device_name, e, e)
if on_transient_failure_fn:
on_transient_failure_fn()
return
# Ignoring derived CancelledErrors to tolerate transient failures in
# PS-worker communication, which initially exposed as an UnavailableError
# and then lead to sub-function cancellation, subsequently getting
# reported from worker to chief as CancelledError.
# We do not mark either worker or PS as failed due to only CancelledError.
# If there are real (non-transient) failures, they must also be reported
# as other errors (UnavailableError most likely) in closure executions.
if isinstance(e, errors.CancelledError) and "/job:" in str(e):
logging.error(
"Remote function on worker %s failed with %r:%s\n"
"This derived error is ignored and not reported to users.",
worker_device_name, e, e)
if on_transient_failure_fn:
on_transient_failure_fn()
return
# This reraises the error, if it's not considered recoverable; otherwise,
# the following failure recovery logic run. At this time, only worker
# unavailability is recoverable. PS unavailability as well as other
# errors in the user function is not recoverable.
self._validate_preemption_failure(e)
logging.error("Worker %s failed with %r:%s", worker_device_name, e, e)
if on_failure_fn:
on_failure_fn()
with self._cluster_update_lock:
self._cluster_due_for_update_or_finish.set()
self._worker_up_cond.wait(_WORKER_MAXIMUM_RECOVERY_SEC)
if self._error_from_recovery:
# TODO(yuefengz): there is only one worker that will get this error.
# Ideally we shuold let all workers notified by `_worker_up_cond` get
# this error.
try:
raise self._error_from_recovery
finally:
self._error_from_recovery = None
logging.info("Worker %s has been recovered.", worker_device_name)
if on_recovery_fn:
with self.wait_on_failure(
on_recovery_fn=on_recovery_fn,
on_transient_failure_fn=on_transient_failure_fn,
worker_device_name=worker_device_name):
on_recovery_fn()
def _preemption_handler(self):
"""A loop that handles preemption.
This loop waits for signal of worker preemption and upon worker preemption,
it waits until all workers are back and updates the cluster about the
restarted workers.
"""
assert self._should_preemption_thread_run
while True:
self._cluster_due_for_update_or_finish.wait()
if not self._should_preemption_thread_run:
logging.info("Stopping the failure handing thread.")
break
with self._cluster_update_lock:
try:
# TODO(haoyuzhang): support partial cluster recovery
logging.info("Cluster now being recovered.")
context.context().update_server_def(self._server_def)
# Cluster updated successfully, clear the update signal, and notify
# all workers that they are recovered from failure.
logging.info("Cluster successfully recovered.")
self._worker_up_cond.notify_all()
# The check for _should_preemption_thread_run is necessary since the
# `stop` may have already set _cluster_due_for_update_or_finish.
if self._should_preemption_thread_run:
self._cluster_due_for_update_or_finish.clear()
except Exception as e: # pylint: disable=broad-except
try:
self._validate_preemption_failure(e)
except Exception as ps_e: # pylint: disable=broad-except
# In this case, a parameter server fails. So we raise this error to
# the caller of `wait_on_failure`.
self._error_from_recovery = ps_e
self._worker_up_cond.notify_all()
if self._should_preemption_thread_run:
self._cluster_due_for_update_or_finish.clear()
# NOTE: Since the first RPC (GetStatus) of update_server_def is
# currently blocking by default, error should only happen if:
# (1) More workers failed while waiting for the previous workers to
# come back;
# (2) Worker failed when exchanging subsequent RPCs after the first
# RPC returns.
# Consider adding backoff retry logic if we see the error logged
# too frequently.
logging.error("Cluster update failed with error: %s. Retrying...", e)
class Worker(object):
"""A worker in a cluster.
Attributes:
worker_index: The index of the worker in the cluster.
device_name: The device string of the worker, e.g. "/job:worker/task:1".
executor: The worker's executor for remote function execution.
failure_handler: The failure handler used to handler worker preemption
failure.
"""
def __init__(self, worker_index, device_name, cluster):
self.worker_index = worker_index
self.device_name = device_name
self.executor = executor.new_executor(enable_async=False)
self.failure_handler = cluster.failure_handler
self._cluster = cluster
self._resource_remote_value_refs = []
self._should_worker_thread_run = True
# Worker threads need to start after `Worker`'s initialization.
threading.Thread(target=self._process_queue,
name="WorkerClosureProcessingLoop-%d" % self.worker_index,
daemon=True).start()
def stop(self):
"""Ensure the worker thread is closed."""
self._should_worker_thread_run = False
def _set_resources_aborted(self):
# TODO(yuefengz): maybe we can query whether a tensor is valid or not
# instead of marking a tensor aborted?
for weakref_resource in self._resource_remote_value_refs:
resource = weakref_resource()
if resource:
resource._set_aborted() # pylint: disable=protected-access
def _set_dead(self):
raise NotImplementedError("_set_dead is not implemented.")
def _process_closure(self, closure):
"""Runs a closure with preemption handling."""
assert closure is not None
try:
with self._cluster.failure_handler.wait_on_failure(
on_failure_fn=lambda: self._cluster.closure_queue.put_back(closure),
on_transient_failure_fn=lambda: self._cluster.closure_queue.put_back(
closure),
on_recovery_fn=self._set_resources_aborted,
worker_device_name=self.device_name):
closure.execute_on(self)
with metric_utils.monitored_timer("remote_value_fetch"):
# Copy the remote tensor to local (the coordinator) in case worker
# becomes unavailable at a later time.
closure.maybe_call_with_output_remote_value(lambda r: r.get())
self._cluster.closure_queue.mark_finished()
except Exception as e: # pylint: disable=broad-except
# Avoid logging the derived cancellation error
if not isinstance(e, errors.CancelledError):
logging.error(
"/job:worker/task:%d encountered the following error when "
"processing closure: %r:%s", self.worker_index, e, e)
closure.maybe_call_with_output_remote_value(lambda r: r._set_error(e)) # pylint: disable=protected-access
self._cluster.closure_queue.mark_failed(e)
def _maybe_delay(self):
"""Delay if corresponding env vars are set."""
# If the following two env vars variables are set. Scheduling for workers
# will start in a staggered manner. Worker i will wait for
# `TF_COORDINATOR_SCHEDULE_START_DELAY` * i seconds, not exceeding
# `TF_COORDINATOR_SCHEDULE_START_DELAY_MAX`.
delay_secs = int(os.environ.get("TF_COORDINATOR_SCHEDULE_START_DELAY", "0"))
delay_cap = int(
os.environ.get("TF_COORDINATOR_SCHEDULE_START_DELAY_MAX", "0"))
if delay_cap:
delay_secs = min(delay_secs * self.worker_index, delay_cap)
if delay_secs > 0:
logging.info("Worker %d sleeping for %d seconds before running function",
self.worker_index, delay_secs)
time.sleep(delay_secs)
def _process_queue(self):
"""Function running in a worker thread to process closure queues."""
self._maybe_delay()
while self._should_worker_thread_run:
closure = self._cluster.closure_queue.get()
if not self._should_worker_thread_run or closure is None:
return
self._process_closure(closure)
# To properly stop the worker and preemption threads, it is important that
# `ClusterCoordinator` object is not held onto so its `__del__` can be
# called. By removing the reference to the `closure` that has already been
# processed, we ensure that the `closure` object is released, while
# getting the next `closure` at above `self._cluster.closure_queue.get()`
# call.
del closure
def create_resource(self, function, args=None, kwargs=None):
"""Synchronously creates a per-worker resource represented by a `RemoteValue`.
Args:
function: the resource function to be run remotely. It should be a
`tf.function`, a concrete function or a Python function.
args: positional arguments to be passed to the function.
kwargs: keyword arguments to be passed to the function.
Returns:
one or several RemoteValue objects depending on the function return
values.
"""
# Some notes about the concurrency: currently all the activities related to
# the same worker such as creating resources, setting resources' aborted
# status, and executing closures happen on the same thread. This allows us
# to have simpler logic of concurrency.
closure = ResourceClosure(
function,
self._cluster.closure_queue._cancellation_mgr, # pylint: disable=protected-access
args=args,
kwargs=kwargs)
resource_remote_value = closure.build_output_remote_value()
self._register_resource(resource_remote_value)
# The following is a short-term solution to lazily create resources in
# parallel.
# TODO(b/160343165): we should create resources eagerly, i.e. schedule the
# resource creation function as soon as users call this method.
resource_remote_value._set_aborted() # pylint: disable=protected-access
return resource_remote_value
def _register_resource(self, resource_remote_value):
if not isinstance(resource_remote_value, RemoteValue):
raise ValueError("Resource being registered is not of type "
"`tf.distribute.experimental.coordinator.RemoteValue`.")
self._resource_remote_value_refs.append(weakref.ref(resource_remote_value))
class Cluster(object):
"""A cluster with workers.
We assume all function errors are fatal and based on this assumption our
error reporting logic is:
1) Both `schedule` and `join` can raise a non-retryable error which is the
first error seen by the coordinator from any previously scheduled functions.
2) When an error is raised, there is no guarantee on how many previously
scheduled functions have been executed; functions that have not been executed
will be thrown away and marked as cancelled.
3) After an error is raised, the internal state of error will be cleared.
I.e. functions can continue to be scheduled and subsequent calls of `schedule`
or `join` will not raise the same error again.
Attributes:
failure_handler: The failure handler used to handler worker preemption
failure.
workers: a list of `Worker` objects in the cluster.
closure_queue: the global Closure queue.
"""
def __init__(self, strategy):
"""Initializes the cluster instance."""
self._num_workers = strategy._num_workers
self._num_ps = strategy._num_ps
# Ignore PS failures reported by workers due to transient connection errors.
# Transient connectivity issues between workers and PS are relayed by the
# workers to the coordinator, leading the coordinator to believe that there
# are PS failures. The difference between transient vs. permanent PS failure
# is the number of reports from the workers. When this env var is set to a
# positive integer K, the coordinator ignores up to K reports of a failed PS
# task, i.e., only when there are more than K trials of executing closures
# fail due to errors from the same PS instance do we consider the PS
# instance encounters a failure.
# TODO(b/164279603): Remove this workaround when the underlying connectivity
# issue in gRPC server is resolved.
self._transient_ps_failures_threshold = int(
os.environ.get("TF_COORDINATOR_IGNORE_TRANSIENT_PS_FAILURES", 3))
self._potential_ps_failures_lock = threading.Lock()
self._potential_ps_failures_count = [0] * self._num_ps
# Ignore worker timeouts due to transient connection errors.
# Transient connectivity issues might cause the server side to unexpectedly
# cancel RPC handling logic, leading to closure execution timeouts. When
# the _transient_timeout_threshold is set to a positive number, the cluster
# coordinator ignores DeadlineExceeded errors from workers for the specified
# times before raising the error to users.
self._transient_timeouts_threshold = int(
os.environ.get("TF_COORDINATOR_IGNORE_TRANSIENT_TIMEOUTS",
self._num_workers // 10))
self._transient_timeouts_lock = threading.Lock()
self._transient_timeouts_count = 0
self.closure_queue = _CoordinatedClosureQueue()
self.failure_handler = WorkerPreemptionHandler(context.get_server_def(),
self)
worker_device_strings = [
"/job:worker/replica:0/task:%d" % i for i in range(self._num_workers)
]
self.workers = [
Worker(i, w, self) for i, w in enumerate(worker_device_strings)
]
def stop(self):
"""Stop worker, worker preemption threads, and the closure queue."""
self.failure_handler.stop()
for worker in self.workers:
worker.stop()
self.closure_queue.stop()
def _record_and_ignore_transient_ps_failure(self, e):
"""Records potential PS failures and return if failure should be ignored."""
if self._transient_ps_failures_threshold <= 0 or not _is_ps_failure(e):
return False
ps_tasks = _extract_failed_ps_instances(str(e))
with self._potential_ps_failures_lock:
for t in ps_tasks:
self._potential_ps_failures_count[t] += 1
# The number of UnavailableError encountered on this PS task exceeds the
# maximum number of ignored error
if (self._potential_ps_failures_count[t] >=
self._transient_ps_failures_threshold):
return False
return True
def _record_and_ignore_transient_timeouts(self, e):
"""Records observed timeout error and return if it should be ignored."""
if self._transient_timeouts_threshold <= 0:
return False
if not isinstance(e, errors.DeadlineExceededError):
return False
with self._transient_timeouts_lock:
self._transient_timeouts_count += 1
if self._transient_timeouts_count >= self._transient_timeouts_threshold:
return False
return True
def schedule(self, function, args, kwargs):
"""Schedules `function` to be dispatched to a worker for execution.
Args:
function: The function to be dispatched to a worker for execution
asynchronously.
args: Positional arguments for `fn`.
kwargs: Keyword arguments for `fn`.
Returns:
A `RemoteValue` object.
"""
closure = Closure(
function,
self.closure_queue._cancellation_mgr, # pylint: disable=protected-access
args=args,
kwargs=kwargs)
ret = closure.build_output_remote_value()
self.closure_queue.put(closure)
return ret
def join(self):
"""Blocks until all scheduled functions are executed."""
self.closure_queue.wait()
def done(self):
"""Returns true if all scheduled functions are executed."""
return self.closure_queue.done()
@tf_export("distribute.experimental.coordinator.ClusterCoordinator",
"distribute.coordinator.ClusterCoordinator", v1=[])
class ClusterCoordinator(object):
"""An object to schedule and coordinate remote function execution.
This class is used to create fault-tolerant resources and dispatch functions
to remote TensorFlow servers.
Currently, this class is not supported to be used in a standalone manner. It
should be used in conjunction with a `tf.distribute` strategy that is designed
to work with it. The `ClusterCoordinator` class currently only works
`tf.distribute.experimental.ParameterServerStrategy`.
__The `schedule`/`join` APIs__
The most important APIs provided by this class is the `schedule`/`join` pair.
The `schedule` API is non-blocking in that it queues a `tf.function` and
returns a `RemoteValue` immediately. The queued functions will be dispatched
to remote workers in background threads and their `RemoteValue`s will be
filled asynchronously. Since `schedule` doesn’t require worker assignment, the
`tf.function` passed in can be executed on any available worker. If the worker
it is executed on becomes unavailable before its completion, it will be
migrated to another worker. Because of this fact and function execution is not
atomic, a function may be executed more than once.
__Handling Task Failure__
This class when used with
`tf.distribute.experimental.ParameterServerStrategy`, comes with built-in
fault tolerance for worker failures. That is, when some workers are not
available for any reason to be reached from the coordinator, the training
progress continues to be made with the remaining workers. Upon recovery of a
failed worker, it will be added for function execution after datasets created
by `create_per_worker_dataset` are re-built on it.
When a parameter server fails, a `tf.errors.UnavailableError` is raised by
`schedule`, `join` or `done`. In this case, in addition to bringing back the
failed parameter server, users should restart the coordinator so that it
reconnects to workers and parameter servers, re-creates the variables, and
loads checkpoints. If the coordinator fails, after the user brings it back,
the program will automatically connect to workers and parameter servers, and
continue the progress from a checkpoint.
It is thus essential that in user's program, a checkpoint file is periodically
saved, and restored at the start of the program. If an
`tf.keras.optimizers.Optimizer` is checkpointed, after restoring from a
checkpoiont, its `iterations` property roughly indicates the number of steps
that have been made. This can be used to decide how many epochs and steps are
needed before the training completion.
See `tf.distribute.experimental.ParameterServerStrategy` docstring for an
example usage of this API.
This is currently under development, and the API as well as implementation
are subject to changes.
"""
def __new__(cls, strategy):
# `ClusterCoordinator` is kept as a single instance to a given `Strategy`.
# TODO(rchao): Needs a lock for thread-safety
if strategy._cluster_coordinator is None:
strategy._cluster_coordinator = super(
ClusterCoordinator, cls).__new__(cls)
return strategy._cluster_coordinator
def __init__(self, strategy):
"""Initialization of a `ClusterCoordinator` instance.
Args:
strategy: a supported `tf.distribute.Strategy` object. Currently, only
`tf.distribute.experimental.ParameterServerStrategy` is supported.
Raises:
ValueError: if the strategy being used is not supported.
"""
if not getattr(self, "_has_initialized", False):
if not isinstance(strategy,
parameter_server_strategy_v2.ParameterServerStrategyV2):
raise ValueError(
"Only `tf.distribute.experimental.ParameterServerStrategy` "
"is supported to work with "
"`tf.distribute.experimental.coordinator.ClusterCoordinator` "
"currently.")
self._strategy = strategy
self.strategy.extended._used_with_coordinator = True
self._cluster = Cluster(strategy)
self._has_initialized = True
def __del__(self):
self._cluster.stop()
@property
def strategy(self):
"""Returns the `Strategy` associated with the `ClusterCoordinator`."""
return self._strategy
def schedule(self, fn, args=None, kwargs=None):
"""Schedules `fn` to be dispatched to a worker for asynchronous execution.
This method is non-blocking in that it queues the `fn` which will be
executed later and returns a
`tf.distribute.experimental.coordinator.RemoteValue` object immediately.
`fetch` can be called on it to wait for the function execution to finish
and retrieve its output from a remote worker. On the other hand, call
`tf.distribute.experimental.coordinator.ClusterCoordinator.join` to wait for
all scheduled functions to finish.
`schedule` guarantees that `fn` will be executed on a worker at least once;
it could be more than once if its corresponding worker fails in the middle
of its execution. Note that since worker can fail at any point when
executing the function, it is possible that the function is partially
executed, but `tf.distribute.experimental.coordinator.ClusterCoordinator`
guarantees that in those events, the function will eventually be executed on
any worker that is available.
If any previously scheduled function raises an error, `schedule` will raise
any one of those errors, and clear the errors collected so far. What happens
here, some of the previously scheduled functions may have not been executed.
User can call `fetch` on the returned
`tf.distribute.experimental.coordinator.RemoteValue` to inspect if they have
executed, failed, or cancelled, and reschedule the corresponding function if
needed.
When `schedule` raises, it guarantees that there is no function that is
still being executed.
At this time, there is no support of worker assignment for function
execution, or priority of the workers.
`args` and `kwargs` are the arguments passed into `fn`, when `fn` is
executed on a worker. They can be
`tf.distribute.experimental.coordinator.PerWorkerValues` and in this case,
the argument will be substituted with the corresponding component on the
target worker. Arguments that are not
`tf.distribute.experimental.coordinator.PerWorkerValues` will be passed into
`fn` as-is. Currently, `tf.distribute.experimental.coordinator.RemoteValue`
is not supported to be input `args` or `kwargs`.
Args:
fn: A `tf.function`; the function to be dispatched to a worker for
execution asynchronously. Regular python funtion is not supported to be
scheduled.
args: Positional arguments for `fn`.
kwargs: Keyword arguments for `fn`.
Returns:
A `tf.distribute.experimental.coordinator.RemoteValue` object that
represents the output of the function scheduled.
Raises:
Exception: one of the exceptions caught by the coordinator from any
previously scheduled function, since the last time an error was thrown
or since the beginning of the program.
"""
if not isinstance(fn,
(def_function.Function, tf_function.ConcreteFunction)):
raise TypeError(
"`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule`"
" only accepts a `tf.function` or a concrete function.")
# Slot variables are usually created during function tracing time; thus
# `schedule` needs to be called within the `strategy.scope()`.
with self.strategy.scope():
self.strategy.extended._being_scheduled = True # pylint: disable=protected-access
remote_value = self._cluster.schedule(fn, args=args, kwargs=kwargs)
self.strategy.extended._being_scheduled = False # pylint: disable=protected-access
return remote_value
def join(self):
"""Blocks until all the scheduled functions have finished execution.
If any previously scheduled function raises an error, `join` will fail by
raising any one of those errors, and clear the errors collected so far. If
this happens, some of the previously scheduled functions may have not been
executed. Users can call `fetch` on the returned
`tf.distribute.experimental.coordinator.RemoteValue` to inspect if they have
executed, failed, or cancelled. If some that have been cancelled need to be
rescheduled, users should call `schedule` with the function again.
When `join` returns or raises, it guarantees that there is no function that
is still being executed.
Raises:
Exception: one of the exceptions caught by the coordinator by any
previously scheduled function since the last time an error was thrown or
since the beginning of the program.
"""
self._cluster.join()
def done(self):
"""Returns whether all the scheduled functions have finished execution.
If any previously scheduled function raises an error, `done` will fail by
raising any one of those errors.
When `done` returns True or raises, it guarantees that there is no function
that is still being executed.
Returns:
Whether all the scheduled functions have finished execution.
Raises:
Exception: one of the exceptions caught by the coordinator by any
previously scheduled function since the last time an error was thrown or
since the beginning of the program.
"""
return self._cluster.done()
def create_per_worker_dataset(self, dataset_fn):
"""Create dataset on workers by calling `dataset_fn` on worker devices.
This creates the given dataset generated by dataset_fn on workers
and returns an object that represents the collection of those individual
datasets. Calling `iter` on such collection of datasets returns a
`tf.distribute.experimental.coordinator.PerWorkerValues`, which is a
collection of iterators, where the iterators have been placed on respective
workers.
Calling `next` on a `PerWorkerValues` of iterator is unsupported. The
iterator is meant to be passed as an argument into
`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule`. When
the scheduled function is about to be executed by a worker, the
function will receive the individual iterator that corresponds to the
worker. The `next` method can be called on an iterator inside a
scheduled function when the iterator is an input of the function.
Currently the `schedule` method assumes workers are all the same and thus
assumes the datasets on different workers are the same, except they may be
shuffled differently if they contain a `dataset.shuffle` operation and a
random seed is not set. Because of this, we also recommend the datasets to
be repeated indefinitely and schedule a finite number of steps instead of
relying on the `OutOfRangeError` from a dataset.
Example:
```python
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver=...)
coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(
strategy=strategy)
@tf.function
def worker_fn(iterator):
return next(iterator)
def per_worker_dataset_fn():
return strategy.distribute_datasets_from_function(
lambda x: tf.data.Dataset.from_tensor_slices([3] * 3))
per_worker_dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
per_worker_iter = iter(per_worker_dataset)
remote_value = coordinator.schedule(worker_fn, args=(per_worker_iter,))
assert remote_value.fetch() == 3
```
Args:
dataset_fn: The dataset function that returns a dataset. This is to be
executed on the workers.
Returns:
An object that represents the collection of those individual
datasets. `iter` is expected to be called on this object that returns
a `tf.distribute.experimental.coordinator.PerWorkerValues` of the
iterators (that are on the workers).
"""
return values_lib.get_per_worker_dataset(dataset_fn, self)
def _create_per_worker_resources(self, fn, args=None, kwargs=None):
"""Synchronously create resources on the workers.
The resources are represented by
`tf.distribute.experimental.coordinator.RemoteValue`s.
Args:
fn: The function to be dispatched to all workers for execution
asynchronously.
args: Positional arguments for `fn`.
kwargs: Keyword arguments for `fn`.
Returns:
A `tf.distribute.experimental.coordinator.PerWorkerValues` object, which
wraps a tuple of `tf.distribute.experimental.coordinator.RemoteValue`
objects.
"""
results = []
for w in self._cluster.workers:
results.append(w.create_resource(fn, args=args, kwargs=kwargs)) # pylint: disable=protected-access
return PerWorkerValues(tuple(results))
def fetch(self, val):
"""Blocking call to fetch results from the remote values.
This is a wrapper around
`tf.distribute.experimental.coordinator.RemoteValue.fetch` for a
`RemoteValue` structure; it returns the execution results of
`RemoteValue`s. If not ready, wait for them while blocking the caller.
Example:
```python
strategy = ...
coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(
strategy)
def dataset_fn():
return tf.data.Dataset.from_tensor_slices([1, 1, 1])
with strategy.scope():
v = tf.Variable(initial_value=0)
@tf.function
def worker_fn(iterator):
def replica_fn(x):
v.assign_add(x)
return v.read_value()
return strategy.run(replica_fn, args=(next(iterator),))
distributed_dataset = coordinator.create_per_worker_dataset(dataset_fn)
distributed_iterator = iter(distributed_dataset)
result = coordinator.schedule(worker_fn, args=(distributed_iterator,))
assert coordinator.fetch(result) == 1
```
Args:
val: The value to fetch the results from. If this is structure of
`tf.distribute.experimental.coordinator.RemoteValue`, `fetch()` will be
called on the individual
`tf.distribute.experimental.coordinator.RemoteValue` to get the result.
Returns:
If `val` is a `tf.distribute.experimental.coordinator.RemoteValue` or a
structure of `tf.distribute.experimental.coordinator.RemoteValue`s,
return the fetched `tf.distribute.experimental.coordinator.RemoteValue`
values immediately if they are available, or block the call until they are
available, and return the fetched
`tf.distribute.experimental.coordinator.RemoteValue` values with the same
structure. If `val` is other types, return it as-is.
"""
def _maybe_fetch(val):
if isinstance(val, RemoteValue):
return val.fetch()
else:
return val
# TODO(yuefengz): we should fetch values in a batch.
return nest.map_structure(_maybe_fetch, val)
def _extract_failed_ps_instances(err_msg):
"""Return a set of potentially failing ps instances from error message."""
tasks = re.findall("/job:ps/replica:0/task:[0-9]+", err_msg)
return set(int(t.split(":")[-1]) for t in tasks)
def _is_ps_failure(error):
"""Whether the error is considered a parameter server failure."""
# For an `InputError`, extract the original error and assess it accordingly.
if isinstance(error, InputError):
error = error.original_exception
return (isinstance(error, (errors.UnavailableError, errors.AbortedError)) and
_RPC_ERROR_FROM_PS in str(error))
def _is_worker_failure(error):
"""Whether the error is considered a worker failure."""
# For an `InputError`, extract the original error and assess it accordingly.
if isinstance(error, InputError):
error = error.original_exception
if _JOB_WORKER_STRING_IDENTIFIER not in str(error):
return False
if _RPC_ERROR_FROM_PS in str(error):
return False
# TODO(haoyuzhang): Consider using special status code if error from a
# remote is derived from RPC errors originated from other hosts.
if isinstance(error, (errors.UnavailableError, errors.AbortedError)):
return True
# The following error could happen when the remote task fails and restarts
# in a very short interval during which no RPCs were exchanged to detect the
# failure. In that case, gRPC allows channel (which is different from a
# connection) to be reused for a replaced server listening to same address.
if isinstance(error, errors.InvalidArgumentError):
if ("unknown device" in str(error) or
"Unable to find the relevant tensor remote_handle" in str(error)):
# TODO(b/159961667): Fix "Unable to find the relevant tensor
# remote_handle" part.
return True
# TODO(b/162541228): The following 2 types of errors are very rare and only
# observed in large-scale testing. The types of errors should be reduced.
# This could happen when the function registration fails. In the observed
# cases this only happens to the dataset related functions.
if isinstance(error, errors.NotFoundError):
if ("is neither a type of a primitive operation nor a name of a function "
"registered" in str(error)):
return True
# NOTE(b/179061495): During worker preemptions, if multiple functions are
# running concurrently (especially with subfunctions spanning chief/PS),
# CancelledError can be returned due to chief/PS cancelling outstanding RPCs
# to the failing workers.
if isinstance(error, errors.CancelledError):
return True
return False
| 42.194444 | 112 | 0.720086 |
acfa396d55f9fcbf5a77d95c6ae6e7987c298267 | 11,796 | py | Python | predict.py | Ramstein/Retinopathy2 | 669e74206c466e6351d4e3df6087c6aa39b5c6c2 | [
"MIT"
] | null | null | null | predict.py | Ramstein/Retinopathy2 | 669e74206c466e6351d4e3df6087c6aa39b5c6c2 | [
"MIT"
] | null | null | null | predict.py | Ramstein/Retinopathy2 | 669e74206c466e6351d4e3df6087c6aa39b5c6c2 | [
"MIT"
] | null | null | null | import argparse
import multiprocessing
import os
import time
import boto3
import pandas as pd
import torch
from botocore.exceptions import ClientError
from pytorch_toolbelt.utils import fs
from retinopathy.inference import run_model_inference
def download_from_s3(s3_filename, local_path="test"):
bucket = "diabetic-retinopathy-data-from-radiology"
region_name = "us-east-1"
s3_client = boto3.client('s3', region_name=region_name)
# print("Downloading file {} to {}".format(s3_filename, local_path))
try:
s3_client.download_file(bucket, Key=s3_filename, Filename=local_path)
except ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
def image_with_name_in_dir(dirname, image_id):
for ext in ['png', 'jpg', 'jpeg', 'tif']:
image_path = os.path.join(dirname, f'{image_id}.{ext}')
if os.path.isfile(image_path):
return image_path
raise FileNotFoundError(image_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input', nargs='+')
parser.add_argument('--need-features', action='store_true')
parser.add_argument('-b', '--batch-size', type=int, default=multiprocessing.cpu_count(),
help='Batch Size during training, e.g. -b 64')
parser.add_argument('-w', '--workers', type=int, default=4, help='')
args = parser.parse_args()
need_features = args.need_features
batch_size = args.batch_size
num_workers = args.workers
checkpoint_fname = args.input # pass just single checkpoint filename as arg
'''Not Changing variables'''
data_dir = '/opt/ml/code/'
checkpoint_path = os.path.join(data_dir, 'model', checkpoint_fname)
current_milli_time = lambda: str(round(time.time() * 1000))
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint_path)
else:
checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
params = checkpoint['checkpoint_data']['cmd_args']
# Make OOF predictions
images_dir = os.path.join(data_dir, "ratinopathy", current_milli_time())
retino = pd.read_csv(os.path.join(data_dir, 'aptos-2019', 'test.csv'))
'''Downloading fundus photography files'''
for id_code in retino['id_code']:
download_from_s3(s3_filename="aptos-2019/train.csv", local_path=os.path.join(images_dir, id_code))
image_paths = retino['id_code'].apply(lambda x: image_with_name_in_dir(images_dir, x))
# Now run inference on Aptos2019 public test, will return a pd.DataFrame having image_id, logits, regrssions, ordinal, features
ratinopathy = run_model_inference(checkpoint=checkpoint,
params=params,
apply_softmax=True,
need_features=need_features,
retino=retino,
image_paths=image_paths,
batch_size=batch_size,
tta='fliplr',
workers=num_workers,
crop_black=True)
ratinopathy.to_pickle(fs.change_extension(checkpoint_fname, '_ratinopathy_predictions.pkl'))
# for i, checkpoint_fname in enumerate(checkpoints):
# # print(i, checkpoint_fname)
# if False:
# # Now run inference on Aptos2019 public test, will return a pd.DataFrame having image_id, logits, regrssions, ordinal, features
# aptos2019_test = run_model_inference(model_checkpoint=checkpoint_fname,
# apply_softmax=True,
# need_features=need_features,
# test_csv=pd.read_csv(os.path.join(data_dir, 'aptos-2019', 'test.csv')),
# data_dir=os.path.join(data_dir, 'aptos-2019'),
# images_dir='test_images_768',
# batch_size=batch_size,
# tta='fliplr',
# workers=num_workers,
# crop_black=True)
# aptos2019_test.to_pickle(fs.change_extension(checkpoint_fname, '_aptos2019_test_predictions.pkl'))
#
# # Now run inference on Aptos2015 private test
# if False:
# aptos2015_df = pd.read_csv(os.path.join(data_dir, 'aptos-2015', 'test_labels.csv'))
# aptos2015_df = aptos2015_df[aptos2015_df['Usage'] == 'Private']
# aptos2015_test = run_model_inference(model_checkpoint=checkpoint_fname,
# apply_softmax=True,
# need_features=need_features,
# test_csv=aptos2015_df,
# data_dir=os.path.join(data_dir, 'aptos-2015'),
# images_dir='test_images_768',
# batch_size=batch_size,
# tta='fliplr',
# workers=num_workers,
# crop_black=True)
# aptos2015_test.to_pickle(fs.change_extension(checkpoint_fname, '_aptos2015_test_private_predictions.pkl'))
#
# if False:
# aptos2015_df = pd.read_csv(os.path.join(data_dir, 'aptos-2015', 'test_labels.csv'))
# aptos2015_df = aptos2015_df[aptos2015_df['Usage'] == 'Public']
# aptos2015_test = run_model_inference(model_checkpoint=checkpoint_fname,
# apply_softmax=True,
# need_features=need_features,
# test_csv=aptos2015_df,
# data_dir=os.path.join(data_dir, 'aptos-2015'),
# images_dir='test_images_768',
# batch_size=batch_size,
# tta='fliplr',
# workers=num_workers,
# crop_black=True)
# aptos2015_test.to_pickle(fs.change_extension(checkpoint_fname, '_aptos2015_test_public_predictions.pkl'))
#
# if False:
# aptos2015_df = pd.read_csv(os.path.join(data_dir, 'aptos-2015', 'train_labels.csv'))
# aptos2015_test = run_model_inference(model_checkpoint=checkpoint_fname,
# apply_softmax=True,
# need_features=need_features,
# test_csv=aptos2015_df,
# data_dir=os.path.join(data_dir, 'aptos-2015'),
# images_dir='train_images_768',
# batch_size=batch_size,
# tta='fliplr',
# workers=num_workers,
# crop_black=True)
# aptos2015_test.to_pickle(fs.change_extension(checkpoint_fname, '_aptos2015_train_predictions.pkl'))
# if False:
# train_ds, valid_ds, train_sizes = get_datasets(data_dir=params['data_dir'],
# use_aptos2019=params['use_aptos2019'],
# use_aptos2015=params['use_aptos2015'],
# use_idrid=params['use_idrid'],
# use_messidor=params['use_messidor'],
# use_unsupervised=False,
# image_size=(image_size, image_size),
# augmentation=params['augmentations'],
# preprocessing=params['preprocessing'],
# target_dtype=int,
# coarse_grading=params.get('coarse', False),
# fold=i,
# folds=4)
# print(len(valid_ds))
# oof_predictions = run_model_inference_via_dataset(checkpoint_fname,
# valid_ds,
# apply_softmax=True,
# need_features=need_features,
# batch_size=batch_size,
# workers=num_workers)
#
# dst_fname = fs.change_extension(checkpoint_fname, '_oof_predictions.pkl')
# oof_predictions.to_pickle(dst_fname)
#
# # Now run inference on holdout IDRID Test dataset
# if False:
# idrid_test = run_model_inference(model_checkpoint=checkpoint_fname,
# apply_softmax=True,
# need_features=need_features,
# test_csv=pd.read_csv(os.path.join(data_dir, 'idrid', 'test_labels.csv')),
# data_dir=os.path.join(data_dir, 'idrid'),
# images_dir='test_images_768',
# batch_size=batch_size,
# tta='fliplr',
# workers=num_workers,
# crop_black=True)
# idrid_test.to_pickle(fs.change_extension(checkpoint_fname, '_idrid_test_predictions.pkl'))
#
# if False:
# # Now run inference on Messidor 2 Test dataset
# messidor2_train = run_model_inference(model_checkpoint=checkpoint_fname,
# apply_softmax=True,
# need_features=need_features,
# test_csv=pd.read_csv(
# os.path.join(data_dir, 'messidor_2', 'train_labels.csv')),
# data_dir=os.path.join(data_dir, 'messidor_2'),
# images_dir='train_images_768',
# batch_size=batch_size,
# tta='fliplr',
# workers=num_workers,
# crop_black=True)
# messidor2_train.to_pickle(fs.change_extension(checkpoint_fname, '_messidor2_train_predictions.pkl'))
if __name__ == '__main__':
main()
| 58.108374 | 141 | 0.469227 |
acfa398f29b973cb98007f0132a7b825bbc8ea54 | 672 | py | Python | test.py | mosesschwartz/curl_to_requests | d306c8073ee999214407e74c90128fcdda5ade02 | [
"MIT"
] | 9 | 2016-06-23T22:44:25.000Z | 2021-09-24T07:42:22.000Z | test.py | mosesschwartz/curl_to_requests | d306c8073ee999214407e74c90128fcdda5ade02 | [
"MIT"
] | 2 | 2018-03-30T00:42:41.000Z | 2019-08-29T04:01:23.000Z | test.py | mosesschwartz/curl_to_requests | d306c8073ee999214407e74c90128fcdda5ade02 | [
"MIT"
] | 12 | 2016-01-14T19:43:24.000Z | 2021-10-09T11:42:41.000Z | '''Converts a cURL command to code for Python Requests'''
import curl_to_requests
curl_cmd = '''curl 'https://github.com/mosesschwartz/curl_to_requests' \\
-H 'Accept-Encoding: gzip, deflate, sdch' \\
-H 'Accept-Language: en-US,en;q=0.8' \\
-H 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.132 Safari/537.36' \\
-H 'Accept: text/html, */*; q=0.01' \\
-H 'Referer: https://github.com/mosesschwartz/curl_to_requests' \\
-H 'Connection: keep-alive' --compressed'''
print curl_cmd
print
print '--> curl_to_requests --> '
print
print curl_to_requests.curl_to_requests(curl_cmd)
| 35.368421 | 145 | 0.693452 |
acfa3a6d8f16080565bf249667dda41947e2b3dd | 1,484 | py | Python | src/model/Librarian.py | XPH0904/Library-management-system | 9990654070caa9f757af9a6f4771ce4b1b484083 | [
"Apache-2.0"
] | null | null | null | src/model/Librarian.py | XPH0904/Library-management-system | 9990654070caa9f757af9a6f4771ce4b1b484083 | [
"Apache-2.0"
] | null | null | null | src/model/Librarian.py | XPH0904/Library-management-system | 9990654070caa9f757af9a6f4771ce4b1b484083 | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
class Librarian:
nameUser = ""
password = ""
def getNameUser(self):
return str(self.nameUser)
def setNameUser(self, nameUser):
self.nameUser = str(nameUser)
def getPassword(self):
return str(self.password)
def setPassword(self, password):
self.password = str(password)
def hashCode(self):
#Create Constants Variable cause pyhton have constants variable
Constants = namedtuple('Constants',['prime'])
constants = Constants(31)
if self.nameUser == None :
temp = 0
else :
temp = int(hash(self.nameUser))
result = 1
result = constants.prime * result + temp
return result
#check the input are not equal
#Return True when is equal
def equals(self, reference, current):
if (current == reference):
return True
if (current == None):
return False
if (__class__ != Librarian):
return False
other = Librarian()
if (self.nameUser == None):
if(other.nameUser != None):
return False
elif (not(self.nameUser == other.nameUser)):
return False
return True
def list_return(self):
return (self.nameUser,self.password)
def toString(self):
return "Librarian [nameUser = " + self.nameUser + ", password=" + self.password + "]"
| 26.035088 | 93 | 0.572102 |
acfa3a78f6394a6f6e9c0e5858038dd2c0b4f427 | 700 | py | Python | leetcode/673_number_of_longest_increasing_subsequence/673.py | nrsyed/leetcode | 958e06b9dd9c07e48dc07028398ce3572c4ec957 | [
"MIT"
] | null | null | null | leetcode/673_number_of_longest_increasing_subsequence/673.py | nrsyed/leetcode | 958e06b9dd9c07e48dc07028398ce3572c4ec957 | [
"MIT"
] | null | null | null | leetcode/673_number_of_longest_increasing_subsequence/673.py | nrsyed/leetcode | 958e06b9dd9c07e48dc07028398ce3572c4ec957 | [
"MIT"
] | null | null | null | class Solution:
def findNumberOfLIS(self, nums: List[int]) -> int:
dp = [1 for _ in nums]
dp_counts = [1 for _ in nums]
for i in range(1, len(nums)):
for j in range(i):
if nums[j] < nums[i]:
if dp[j] + 1 > dp[i]:
dp[i] = dp[j] + 1
dp_counts[i] = dp_counts[j]
elif dp[j] + 1 == dp[i]:
dp_counts[i] += dp_counts[j]
max_len = max(dp)
count = 0
for dp_len, dp_count in zip(dp, dp_counts):
if dp_len == max_len:
count += dp_count
return count
| 33.333333 | 54 | 0.402857 |
acfa3aa213538b8649a40c1d6c78848191838f55 | 1,003 | py | Python | senz-client-samples/python/senz/client.py | AdityaSrivast/senz | 5800b17403dce6ea58baeb8bb41dae12d3937055 | [
"Apache-2.0"
] | 58 | 2018-08-15T04:14:35.000Z | 2021-05-20T05:37:31.000Z | senz-client-samples/python/senz/client.py | AdityaSrivast/senz | 5800b17403dce6ea58baeb8bb41dae12d3937055 | [
"Apache-2.0"
] | 125 | 2018-11-09T12:20:21.000Z | 2021-11-18T13:57:10.000Z | senz-client-samples/python/senz/client.py | AdityaSrivast/senz | 5800b17403dce6ea58baeb8bb41dae12d3937055 | [
"Apache-2.0"
] | 123 | 2018-05-02T19:18:22.000Z | 2021-11-18T13:58:05.000Z | #!/usr/bin/env python3
import socket, sys, time
host, port = 'localhost', 2552
# Create an ipv4 socket
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the client
client.connect((host, port))
# Send message to server
def sendMessage(message):
# Send message
client.send(message.encode())
# Receive and print the respond
res = receiveMessage()
print("[Server] {}\n".format(res))
def receiveMessage():
response = client.recv(5000000)
return response.decode()
def getTimestamp():
return time.strftime("%Y%m%d%H%M%S", time.localtime())
if __name__ == "__main__":
# Load file if provided
commands = []
if len(sys.argv) > 1:
filename = sys.argv[1]
with open (filename, "r") as myfile:
commands=myfile.read().split('\n')
print(commands)
# send commands of file
for line in commands:
sendMessage(line)
# send some data (in this case a HTTP GET request)
while True:
msg = input()
sendMessage(msg)
| 20.895833 | 58 | 0.661017 |
acfa3b59aca7f397c1997a1a02c14d6e60143e71 | 3,871 | py | Python | lib/helpers/trainer_helper.py | xinzhuma/monodle | e426aa65fdc7ceedcaab0d637acf3d3425d0736c | [
"MIT"
] | 92 | 2021-03-31T02:40:27.000Z | 2022-03-30T03:35:27.000Z | lib/helpers/trainer_helper.py | xinzhuma/monodle | e426aa65fdc7ceedcaab0d637acf3d3425d0736c | [
"MIT"
] | 22 | 2021-06-17T02:32:26.000Z | 2022-01-30T14:23:41.000Z | lib/helpers/trainer_helper.py | xinzhuma/monodle | e426aa65fdc7ceedcaab0d637acf3d3425d0736c | [
"MIT"
] | 17 | 2021-06-13T23:39:30.000Z | 2022-03-03T07:09:14.000Z | import os
import tqdm
import torch
import numpy as np
import torch.nn as nn
from lib.helpers.save_helper import get_checkpoint_state
from lib.helpers.save_helper import load_checkpoint
from lib.helpers.save_helper import save_checkpoint
from lib.losses.centernet_loss import compute_centernet3d_loss
class Trainer(object):
def __init__(self,
cfg,
model,
optimizer,
train_loader,
test_loader,
lr_scheduler,
warmup_lr_scheduler,
logger):
self.cfg = cfg
self.model = model
self.optimizer = optimizer
self.train_loader = train_loader
self.test_loader = test_loader
self.lr_scheduler = lr_scheduler
self.warmup_lr_scheduler = warmup_lr_scheduler
self.logger = logger
self.epoch = 0
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# loading pretrain/resume model
if cfg.get('pretrain_model'):
assert os.path.exists(cfg['pretrain_model'])
load_checkpoint(model=self.model,
optimizer=None,
filename=cfg['pretrain_model'],
map_location=self.device,
logger=self.logger)
if cfg.get('resume_model', None):
assert os.path.exists(cfg['resume_model'])
self.epoch = load_checkpoint(model=self.model.to(self.device),
optimizer=self.optimizer,
filename=cfg['resume_model'],
map_location=self.device,
logger=self.logger)
self.lr_scheduler.last_epoch = self.epoch - 1
self.gpu_ids = list(map(int, cfg['gpu_ids'].split(',')))
self.model = torch.nn.DataParallel(model, device_ids=self.gpu_ids).to(self.device)
def train(self):
start_epoch = self.epoch
progress_bar = tqdm.tqdm(range(start_epoch, self.cfg['max_epoch']), dynamic_ncols=True, leave=True, desc='epochs')
for epoch in range(start_epoch, self.cfg['max_epoch']):
# reset random seed
# ref: https://github.com/pytorch/pytorch/issues/5059
np.random.seed(np.random.get_state()[1][0] + epoch)
# train one epoch
self.train_one_epoch()
self.epoch += 1
# update learning rate
if self.warmup_lr_scheduler is not None and epoch < 5:
self.warmup_lr_scheduler.step()
else:
self.lr_scheduler.step()
# save trained model
if (self.epoch % self.cfg['save_frequency']) == 0:
os.makedirs('checkpoints', exist_ok=True)
ckpt_name = os.path.join('checkpoints', 'checkpoint_epoch_%d' % self.epoch)
save_checkpoint(get_checkpoint_state(self.model, self.optimizer, self.epoch), ckpt_name)
progress_bar.update()
return None
def train_one_epoch(self):
self.model.train()
progress_bar = tqdm.tqdm(total=len(self.train_loader), leave=(self.epoch+1 == self.cfg['max_epoch']), desc='iters')
for batch_idx, (inputs, targets, _) in enumerate(self.train_loader):
inputs = inputs.to(self.device)
for key in targets.keys():
targets[key] = targets[key].to(self.device)
# train one batch
self.optimizer.zero_grad()
outputs = self.model(inputs)
total_loss, stats_batch = compute_centernet3d_loss(outputs, targets)
total_loss.backward()
self.optimizer.step()
progress_bar.update()
progress_bar.close()
| 35.513761 | 123 | 0.57427 |
acfa3bbbd24db35cd01c727d977b8418b41d602f | 4,563 | py | Python | tests/normalize/test_normalize_date.py | pmaciel/climetlab | faa0077d615aed05f9e0b4cc2e73f1cf7a9ae61c | [
"Apache-2.0"
] | null | null | null | tests/normalize/test_normalize_date.py | pmaciel/climetlab | faa0077d615aed05f9e0b4cc2e73f1cf7a9ae61c | [
"Apache-2.0"
] | null | null | null | tests/normalize/test_normalize_date.py | pmaciel/climetlab | faa0077d615aed05f9e0b4cc2e73f1cf7a9ae61c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import datetime
import pytest
from climetlab import load_source
from climetlab.decorators import normalize
from climetlab.testing import climetlab_file
def f(d):
return d
def test_normalize_dates_from_source():
dates_3 = normalize("d", "date")(f)
dates_list_3 = normalize("d", "date", multiple=True)(f)
source = load_source("file", climetlab_file("docs/examples/test.grib"))
assert dates_3(source[0]) == datetime.datetime(2020, 5, 13, 12, 0)
assert dates_list_3(source[0]) == [datetime.datetime(2020, 5, 13, 12, 0)]
source = load_source("file", climetlab_file("docs/examples/test.nc"))
# For now
with pytest.raises(NotImplementedError):
assert dates_3(source[0]) == datetime.datetime(2020, 5, 13, 12, 0)
assert dates_list_3(source[0]) == [datetime.datetime(2020, 5, 13, 12, 0)]
def test_dates_formated_1():
date_formated = normalize("d", "date", format="%Y.%m.%d")(f)
assert date_formated("20200513") == "2020.05.13"
@pytest.mark.skip(reason="Not implemented yet.")
def test_enum_dates_formated():
date_formated = normalize(
"d", values=["20010512", "20020512"], type="date", format="%Y.%m.%d"
)(f)
assert date_formated("20200513") == "2020.05.13"
def test_dates_formated():
date_formated = normalize("d", "date-list(%Y.%m.%d)")(f)
assert date_formated(["20200513", "20200514"]) == ["2020.05.13", "2020.05.14"]
assert date_formated("20200513") == ["2020.05.13"]
assert date_formated([datetime.datetime(2020, 5, 13, 0, 0)]) == ["2020.05.13"]
assert date_formated([datetime.datetime(2020, 5, 13, 23, 59)]) == ["2020.05.13"]
def test_dates_multiple():
date_1 = normalize("d", "date-list(%Y.%m.%d)")(f)
date_2 = normalize("d", "date(%Y.%m.%d)", multiple=True)(f)
date_3 = normalize("d", "date(%Y.%m.%d)", multiple=False)(f)
date_4 = normalize("d", "date-list(%Y.%m.%d)", multiple=False)(f)
assert date_1("20200511") == ["2020.05.11"]
assert date_2("20200512") == ["2020.05.12"]
assert date_3("20200513") == "2020.05.13"
with pytest.raises(ValueError):
date_4("20200514")
def test_dates_formated_from_pandas():
import pandas as pd
df1 = pd.DataFrame(
[
datetime.datetime(2005, 8, 27, 18, 0),
],
columns=["date"],
)
df2 = pd.DataFrame(
[
datetime.datetime(2005, 8, 26, 18, 0),
datetime.datetime(2005, 8, 27, 18, 0),
],
columns=["date"],
)
@normalize("date", "date-list(%Y-%m-%d)")
def f(date):
return date
print(f(df1))
print(f(df2))
@normalize("date", "date(%Y-%m-%d)")
def f(date):
return date
print(f(df1))
with pytest.raises(AssertionError):
print(f(df2))
@pytest.mark.skip("Not implemented (yet?).")
def test_dates_formated_from_object():
date_formated = normalize("d", "date", format="%Y.%m.%d")(f)
class CustomDateObject:
def __init__(self, dates):
self.dates = dates
def to_datetime_list(self):
return self.dates
obj = CustomDateObject(
[
datetime.datetime(2005, 8, 26, 18, 0),
datetime.datetime(2005, 8, 26, 18, 0),
]
)
assert date_formated(obj) == "2020.05.13"
def test_date_none_1():
@normalize(
"name",
"date(%Y%m%d)",
)
def date_default_none(name=None):
return name
assert date_default_none("2012-12-02") == "20121202"
assert date_default_none() is None
def test_date_list_none_1():
@normalize(
"name",
"date-list(%Y%m%d)",
)
def date_default_none(name=None):
return name
assert date_default_none("2012-12-02") == ["20121202"]
assert date_default_none() is None
def test_date_default_1():
@normalize(
"name",
"date",
)
def date_default_1(name="wrong-default"):
return name
date_default_1("2012-12-02")
with pytest.raises(ValueError, match=".*wrong-default.*"):
date_default_1()
if __name__ == "__main__":
from climetlab.testing import main
main(__file__)
| 26.074286 | 84 | 0.619987 |
acfa3c7f43189c7fdefece64379da1bcc1b276aa | 1,802 | py | Python | utils/render_app.py | ksachdeva/PRNet | e3dbba5fa8597c9006395cb074611d8d8928adbe | [
"MIT"
] | 2 | 2018-06-11T03:49:54.000Z | 2018-06-11T04:40:30.000Z | utils/render_app.py | ksachdeva/PRNet | e3dbba5fa8597c9006395cb074611d8d8928adbe | [
"MIT"
] | null | null | null | utils/render_app.py | ksachdeva/PRNet | e3dbba5fa8597c9006395cb074611d8d8928adbe | [
"MIT"
] | 1 | 2020-01-04T07:11:42.000Z | 2020-01-04T07:11:42.000Z | import numpy as np
from render import vis_of_vertices, render_texture
from scipy import ndimage
def get_visibility(vertices, triangles, h, w):
triangles = triangles.T
vertices_vis = vis_of_vertices(vertices.T, triangles, h, w)
vertices_vis = vertices_vis.astype(bool)
for k in range(2):
tri_vis = vertices_vis[triangles[0,:]] | vertices_vis[triangles[1,:]] | vertices_vis[triangles[2,:]]
ind = triangles[:, tri_vis]
vertices_vis[ind] = True
# for k in range(2):
# tri_vis = vertices_vis[triangles[0,:]] & vertices_vis[triangles[1,:]] & vertices_vis[triangles[2,:]]
# ind = triangles[:, tri_vis]
# vertices_vis[ind] = True
vertices_vis = vertices_vis.astype(np.float32) #1 for visible and 0 for non-visible
return vertices_vis
def get_uv_mask(vertices_vis, triangles, uv_coords, h, w, resolution):
triangles = triangles.T
vertices_vis = vertices_vis.astype(np.float32)
uv_mask = render_texture(uv_coords.T, vertices_vis[np.newaxis, :], triangles, resolution, resolution, 1)
uv_mask = np.squeeze(uv_mask > 0)
uv_mask = ndimage.binary_closing(uv_mask)
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = ndimage.binary_closing(uv_mask)
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = uv_mask.astype(np.float32)
return np.squeeze(uv_mask)
def get_depth_image(vertices, triangles, h, w, isShow = False):
z = vertices[:, 2:]
if isShow:
z = z/max(z)
depth_image = render_texture(vertices.T, z.T, triangles.T, h, w, 1)
return np.squeeze(depth_image) | 45.05 | 110 | 0.690899 |
acfa3cf0c5cd35c597b775a67eecffee75b20593 | 4,558 | py | Python | pbda_learn.py | omercohen7640/pbda_DNN | b43631373ecdba5011e94426992563c457e06907 | [
"BSD-2-Clause"
] | 6 | 2015-01-15T03:37:42.000Z | 2020-09-29T08:49:05.000Z | pbda_learn.py | omercohen7640/pbda_DNN | b43631373ecdba5011e94426992563c457e06907 | [
"BSD-2-Clause"
] | 1 | 2019-02-20T22:12:34.000Z | 2019-02-20T22:12:34.000Z | pbda_learn.py | omercohen7640/pbda_DNN | b43631373ecdba5011e94426992563c457e06907 | [
"BSD-2-Clause"
] | 4 | 2015-01-15T03:37:43.000Z | 2019-02-20T22:06:13.000Z | #!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
PAC-BAYESIAN DOMAIN ADAPTATION (aka PBDA)
Executable script to launch the learning algorithm
@author: Pascal Germain -- http://researchers.lille.inria.fr/pgermain/
'''
import common
from pbda import *
from dataset import *
from kernel import *
import sys
import pickle
import argparse
common.print_header('LEARNING ALGORITHM')
# Arguments parser
parser = argparse.ArgumentParser(description="", formatter_class=common.custom_formatter, epilog="")
parser.add_argument("-c", dest="C_value", type=float, default=1.0, help="Trade-off parameter \"C\" (source risk modifier). Default: 1.0")
parser.add_argument("-a", dest="A_value", type=float, default=1.0, help="Trade-off parameter \"A\" (domain disagreement modifier). Default: 1.0")
parser.add_argument("--kernel", "-k", dest="kernel", default="linear", choices=['rbf', 'linear'], help="Kernel function. Default: linear.")
parser.add_argument("--gamma", "-g", dest="gamma", type=float, default=1.0, help="Gamma parameter of the RBF kernel. Only used if --kernel is set to rbf. Default: 1.0")
parser.add_argument("--nb_restarts", "-n", dest="nb_restarts", type=int, default=1, help='Number of random restarts of the optimization process. Default: 1')
parser.add_argument("--format", "-f", dest="format", choices=['matrix', 'svmlight'], default='matrix', help='Datasets format. Default: matrix (each line defines an example, the first column defines the label in {-1, 1}, and the next columns represent the real-valued features)')
parser.add_argument("--model", "-m", dest="model_file", default='model.bin', help="Model file name. Default: model.bin")
parser.add_argument("--weight", "-w", dest="weight_file", default='', help="Weight vector file name. Default: (none)")
parser.add_argument("source_file", help="Defines the file containing the source dataset.")
parser.add_argument("target_file", help="Defines the file containing the target dataset.")
args = parser.parse_args()
# Main program
###############################################################################
print('... Loading dataset files ...')
###############################################################################
try:
if args.format == 'matrix':
source_data = dataset_from_matrix_file(args.source_file)
elif args.format == 'svmlight':
source_data = dataset_from_svmlight_file(args.source_file)
except:
print('ERROR: Unable to load source file "' + args.source_file + '".')
sys.exit(-1)
print(str(source_data.get_nb_examples()) + ' source examples loaded.')
try:
if args.format == 'matrix':
target_data = dataset_from_matrix_file(args.target_file)
elif args.format == 'svmlight':
target_data = dataset_from_svmlight_file(args.target_file, source_data.get_nb_features())
source_data.reshape_features(target_data.get_nb_features())
except:
print('ERROR: Unable to load target file "' + args.target_file + '".')
sys.exit(-1)
print(str(target_data.get_nb_examples()) + ' target examples loaded.')
###############################################################################
print('\n... Learning ...')
###############################################################################
if args.kernel == 'rbf':
kernel = Kernel('rbf', gamma=args.gamma)
elif args.kernel == 'linear':
kernel = Kernel('linear')
algo = Pbda(A=args.A_value, C=args.C_value, verbose=True, nb_restarts=args.nb_restarts )
classifier = algo.learn(source_data, target_data, kernel)
###############################################################################
print('\n... Saving model: "' + args.model_file + '" ...')
###############################################################################
try:
with open(args.model_file, 'wb') as model:
pickle.dump(classifier, model, pickle.HIGHEST_PROTOCOL)
print('File "' + args.model_file + '" created.')
except:
print('ERROR: Unable to write model file "' + args.model_file + '".')
if len(args.weight_file) > 0:
try:
classifier.write_to_file(args.weight_file)
print('File "' + args.weight_file + '" created.')
except:
print('ERROR: Unable to write weight file "' + args.weight_file + '".')
###############################################################################
print('\n... Computing statistics ...')
###############################################################################
stats_dict = algo.get_stats()
for key,val in stats_dict.items():
print( str(key) + ' = ' + str(val) )
| 45.128713 | 279 | 0.599166 |
acfa3d529d06756cb4dbd1f99d02ec417d2e2d7b | 11,183 | py | Python | monorail/tests/test_ai.py | heinervdm/MysticMine | cba23575c3054af967aae1a15cfb88b28bedc28d | [
"MIT"
] | 1 | 2019-10-19T09:54:32.000Z | 2019-10-19T09:54:32.000Z | monorail/tests/test_ai.py | heinervdm/MysticMine | cba23575c3054af967aae1a15cfb88b28bedc28d | [
"MIT"
] | null | null | null | monorail/tests/test_ai.py | heinervdm/MysticMine | cba23575c3054af967aae1a15cfb88b28bedc28d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import monorail.ai as ai
class SimpleNode (ai.Node):
"""AiNode for use in the unit tests
"""
def __init__( self, parent = None ):
ai.Node.__init__( self, parent )
def generate_childeren( self ):
"""Generate and return the list of childeren nodes of this node
"""
self.childeren = [SimpleNode(self), SimpleNode(self)]
return self.childeren
def calc_score( self ):
"""Return the individual score of this node
"""
return 1
def __eq__( self, other ):
return self is other
##class TestNode:
## def test_get_generation( self ):
## """Given a node
## When childeren and grandchilderen are calculated
## Then child is generation 1 and grandchild generation 2
## """
## # Given
## node = SimpleNode()
##
## # When
## child = node.generate_childeren()[0]
## grandchild = child.generate_childeren()[0]
##
## # Then
## assert node.get_generation( node ) == 0
## assert child.get_generation( node ) == 1
## assert grandchild.get_generation( node ) == 2
## assert node.get_generation( grandchild ) == -1
## def test_set_score_propagates_to_parent_best_score( self ):
## """Given a structure of child Nodes
## When the score is set to high on a child
## Then the best_score of the parents are updated
## """
## # Given
## node = SimpleNode()
##
## node.generate_childeren()
## node.set_score(1)
## for child in node.get_childeren():
## child.generate_childeren()
## child.set_score(1)
##
## # When/Then
## child = node.get_childeren()[1].get_childeren()[0]
## child.set_score(3)
## assert node.get_best_score() == child.get_total_score()
## assert 1 < node.get_best_score() < 5
##
## child = node.get_childeren()[0].get_childeren()[0]
## child.set_score( 5 )
## assert node.get_best_score() == child.get_total_score()
## best_score = node.get_best_score()
##
## child = node.get_childeren()[1].get_childeren()[0]
## child.set_score(3)
## assert node.get_best_score() == best_score
## def test_get_best_child( self ):
## """Given a structure of child nodes
## When the score of one child is higher than another
## Then get_best_childs returns the child with the highest score
## """
## # Given
## node = SimpleNode()
##
## node.generate_childeren()
## node.set_score(1)
## for child in node.get_childeren():
## child.set_score(1)
##
## # When/Then
## assert len(node.get_best_childs()) == 2
##
## child = node.get_childeren()[1]
## child.set_score(3)
##
## assert len(node.get_best_childs()) == 1
## assert node.get_best_childs()[0] is child
## def test_is_leaf( self ):
## """Given a structure of child nodes
## When is_leaf is called on a node
## It returns true when the node is a leaf, and false otherwise
## """
## # Given
## node = SimpleNode()
##
## node.generate_childeren()
## for child in node.get_childeren():
## child.generate_childeren()
##
## # When/Then
## assert not node.is_leaf()
## assert not node.get_childeren()[0].is_leaf()
## assert node.get_childeren()[0].get_childeren()[0].is_leaf()
class TestPredictionTree:
def test_constructor( self ):
tree = ai.PredictionTree()
tree = ai.PredictionTree( 30 )
def test_set_root_sets_root( self ):
"""Given a PredictionTree and a node
When set_root is called with the node
Then root_node is set to that node
"""
# Given
tree = ai.PredictionTree()
node = SimpleNode()
# When
tree.set_root( node )
# Then
assert tree.root_node is node
## def test_update_creates_childeren( self ):
## """Given a PredictionTree with a root node
## When update is called
## Then the childeren of the root node are recursively calculated
## """
## # Given
## tree = ai.PredictionTree()
## tree.set_root( SimpleNode() )
##
## # When
## tree.update()
##
## # Then
## assert tree.root_node.get_childeren() is not None
## assert len(tree.root_node.get_childeren()) == 2
##
## child = tree.root_node.get_childeren()[0]
## assert child is not None
## assert len(child.get_childeren()) == 2
## def test_replace_root_with_child_is_optimized( self ):
## """Given a PredictionTree with root node and childeren
## When the root node is replaced by one of its childs
## The tree is reused
## """
## # Given
## tree = ai.PredictionTree()
## tree.set_root( SimpleNode() )
## tree.update() # generate the tree
##
## # When
## tree.set_root( tree.root_node.get_childeren()[0] )
##
## # Then
## assert len(tree.root_node.get_childeren()) == 2
## def test_replace_root_with_non_child( self ):
## """Given a PredictionTree with root node and childeren
## When the root node is replaced by a new node
## The tree is created from scratch
## """
## # Given
## tree = ai.PredictionTree()
## tree.set_root( SimpleNode() )
## tree.update() # generate the tree
##
## # When
## tree.set_root( SimpleNode() )
##
## # Then
## assert tree.root_node.get_childeren() is None
## def test_scores_are_calculated( self ):
## """Given a PredictionTree with root node
## When the childeren are calculated
## Then all nodes receive scores
## """
## # Given
## tree = ai.PredictionTree( MAX_GENERATIONS = 5 )
## tree.set_root( SimpleNode() )
##
## # When
## tree.update()
##
## # Then
## child = tree.root_node.get_childeren()[0]
## grandchild = child.get_childeren()[0]
##
## assert tree.root_node.get_total_score() == 1
## assert 1 < child.get_total_score() < grandchild.get_total_score() < 3
## def test_scores_are_recalculated_on_root_change( self ):
## """Given a PredictionTree with root node and scores
## When the root node changes to a child node
## Then all node scores are recalculated
## """
## # Given
## tree = ai.PredictionTree( MAX_GENERATIONS = 5 )
## tree.set_root( SimpleNode() )
##
## # When
## tree.update()
## tree.set_root( tree.root_node.get_childeren()[0] )
## tree.update()
##
## # Then
## child = tree.root_node.get_childeren()[0]
## grandchild = child.get_childeren()[0]
##
## assert tree.root_node.get_total_score() == 1
## assert 1 < child.get_total_score() < grandchild.get_total_score() < 3
## def test_get_nodes_of_generation( self ):
## """Given a PredictionTree with root node and childeren
## When the childeren are calculated
## Then we can get the nodes of each generation
## """
## # Given
## tree = ai.PredictionTree( MAX_GENERATIONS = 5 )
## tree.set_root( SimpleNode() )
##
## # When
## tree.update()
##
## # Then
## generation = tree.get_nodes_of_generation(0)
## assert len(generation) == 1
## assert generation[0] is tree.root_node
## assert generation[0].generation == 0
##
## generation = tree.get_nodes_of_generation(1)
## assert len(generation) == 2
## assert generation[0] in tree.root_node.get_childeren()
## assert generation[1] in tree.root_node.get_childeren()
## assert generation[0].generation == 1
## assert generation[0].generation == 1
##
## generation = tree.get_nodes_of_generation(2)
## assert len(generation) == 4
## assert generation[0] in tree.root_node.get_childeren()[0].get_childeren()\
## or generation[0] in tree.root_node.get_childeren()[1].get_childeren()
## assert generation[1] in tree.root_node.get_childeren()[0].get_childeren()\
## or generation[1] in tree.root_node.get_childeren()[1].get_childeren()
## assert generation[2] in tree.root_node.get_childeren()[0].get_childeren()\
## or generation[2] in tree.root_node.get_childeren()[1].get_childeren()
## assert generation[3] in tree.root_node.get_childeren()[0].get_childeren()\
## or generation[3] in tree.root_node.get_childeren()[1].get_childeren()
## assert generation[0].generation == 2
## assert generation[1].generation == 2
## assert generation[2].generation == 2
## assert generation[3].generation == 2
## def test_get_nodes_of_generation_on_new_root( self ):
## """Given a PredictionTree with root node and childeren
## When root node is recalculated
## Then the generations are also recalculated
## """
## # Given
## tree = ai.PredictionTree( MAX_GENERATIONS = 5 )
## tree.set_root( SimpleNode() )
##
## # When
## tree.update()
## tree.set_root( tree.root_node.get_childeren()[0] )
## tree.update()
##
## # Then
## generation = tree.get_nodes_of_generation(0)
## assert len(generation) == 1
## assert generation[0] is tree.root_node
## assert generation[0].generation == 0
##
## generation = tree.get_nodes_of_generation(1)
## assert len(generation) == 2
## assert generation[0] in tree.root_node.get_childeren()
## assert generation[1] in tree.root_node.get_childeren()
## assert generation[0].generation == 1
## assert generation[0].generation == 1
##
## generation = tree.get_nodes_of_generation(2)
## assert len(generation) == 4
## assert generation[0] in tree.root_node.get_childeren()[0].get_childeren()\
## or generation[0] in tree.root_node.get_childeren()[1].get_childeren()
## assert generation[1] in tree.root_node.get_childeren()[0].get_childeren()\
## or generation[1] in tree.root_node.get_childeren()[1].get_childeren()
## assert generation[2] in tree.root_node.get_childeren()[0].get_childeren()\
## or generation[2] in tree.root_node.get_childeren()[1].get_childeren()
## assert generation[3] in tree.root_node.get_childeren()[0].get_childeren()\
## or generation[3] in tree.root_node.get_childeren()[1].get_childeren()
## assert generation[0].generation == 2
## assert generation[1].generation == 2
## assert generation[2].generation == 2
## assert generation[3].generation == 2
##
| 36.42671 | 86 | 0.577394 |
acfa3d5cdebd91d3306e839ed7e730af9f564ed7 | 2,944 | py | Python | pypy/module/cpyext/__init__.py | yxzoro/pypy | 6e47b3d3e5513d9639a21554963a6ace172ccfee | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/module/cpyext/__init__.py | yxzoro/pypy | 6e47b3d3e5513d9639a21554963a6ace172ccfee | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/module/cpyext/__init__.py | yxzoro/pypy | 6e47b3d3e5513d9639a21554963a6ace172ccfee | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | from pypy.interpreter.mixedmodule import MixedModule
from pypy.module.cpyext.state import State
from pypy.module.cpyext import api
class Module(MixedModule):
interpleveldefs = {
'is_cpyext_function': 'interp_cpyext.is_cpyext_function',
}
appleveldefs = {
}
atexit_funcs = []
def startup(self, space):
space.fromcache(State).startup(space)
method = pypy.module.cpyext.typeobject.get_new_method_def(space)
# the w_self argument here is a dummy, the only thing done with w_obj
# is call type() on it
w_obj = pypy.module.cpyext.methodobject.W_PyCFunctionObject(space,
method, space.w_None)
space.appexec([w_obj], """(meth):
from pickle import Pickler
Pickler.dispatch[type(meth)] = Pickler.save_global
""")
def register_atexit(self, function):
if len(self.atexit_funcs) >= 32:
raise ValueError("cannot register more than 32 atexit functions")
self.atexit_funcs.append(function)
def shutdown(self, space):
for func in self.atexit_funcs:
func()
# import these modules to register api functions by side-effect
import pypy.module.cpyext.pyobject
import pypy.module.cpyext.boolobject
import pypy.module.cpyext.floatobject
import pypy.module.cpyext.modsupport
import pypy.module.cpyext.pythonrun
import pypy.module.cpyext.pyerrors
import pypy.module.cpyext.typeobject
import pypy.module.cpyext.object
import pypy.module.cpyext.bytesobject
import pypy.module.cpyext.bytearrayobject
import pypy.module.cpyext.tupleobject
import pypy.module.cpyext.setobject
import pypy.module.cpyext.dictobject
import pypy.module.cpyext.longobject
import pypy.module.cpyext.listobject
import pypy.module.cpyext.sequence
import pypy.module.cpyext.buffer
import pypy.module.cpyext.eval
import pypy.module.cpyext.import_
import pypy.module.cpyext.mapping
import pypy.module.cpyext.iterator
import pypy.module.cpyext.unicodeobject
import pypy.module.cpyext.sysmodule
import pypy.module.cpyext.number
import pypy.module.cpyext.sliceobject
import pypy.module.cpyext.stubsactive
import pypy.module.cpyext.pystate
import pypy.module.cpyext.cdatetime
import pypy.module.cpyext.complexobject
import pypy.module.cpyext.weakrefobject
import pypy.module.cpyext.funcobject
import pypy.module.cpyext.frameobject
import pypy.module.cpyext.classobject
import pypy.module.cpyext.exception
import pypy.module.cpyext.memoryobject
import pypy.module.cpyext.codecs
import pypy.module.cpyext.pyfile
import pypy.module.cpyext.pystrtod
import pypy.module.cpyext.pytraceback
import pypy.module.cpyext.methodobject
import pypy.module.cpyext.dictproxyobject
import pypy.module.cpyext.marshal
import pypy.module.cpyext.genobject
import pypy.module.cpyext.namespaceobject
# now that all rffi_platform.Struct types are registered, configure them
api.configure_types()
| 34.635294 | 80 | 0.767663 |
acfa3e33d3dfd71d7c65dc772203c0e8fd49f3d7 | 470 | py | Python | jss/migrations/0010_auto_20160601_0050.py | cshepp1211/MacDash | 61e0244d951fbd6f4e19b632072f879e57d11372 | [
"Apache-2.0"
] | 20 | 2016-08-16T20:47:51.000Z | 2020-11-17T14:17:34.000Z | jss/migrations/0010_auto_20160601_0050.py | cshepp1211/MacDash | 61e0244d951fbd6f4e19b632072f879e57d11372 | [
"Apache-2.0"
] | 7 | 2016-08-18T14:55:21.000Z | 2019-09-09T01:18:10.000Z | jss/migrations/0010_auto_20160601_0050.py | cshepp1211/MacDash | 61e0244d951fbd6f4e19b632072f879e57d11372 | [
"Apache-2.0"
] | 3 | 2016-08-23T18:09:23.000Z | 2017-05-24T18:14:30.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-01 00:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jss', '0009_auto_20160601_0048'),
]
operations = [
migrations.AlterField(
model_name='computer',
name='warranty_expires',
field=models.DateField(max_length=200, null=True),
),
]
| 22.380952 | 62 | 0.625532 |
acfa432ef11b3d1be5ce94fda675567ee0cf1775 | 579 | py | Python | setup.py | buckley-w-david/event-scan | 73b9522c0ed9d5c533c5e0084bdb0a87643f3e58 | [
"MIT"
] | null | null | null | setup.py | buckley-w-david/event-scan | 73b9522c0ed9d5c533c5e0084bdb0a87643f3e58 | [
"MIT"
] | null | null | null | setup.py | buckley-w-david/event-scan | 73b9522c0ed9d5c533c5e0084bdb0a87643f3e58 | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
with open('README.md', 'r') as readme:
long_description = readme.read()
setup(
name='event_scan',
version='0.1.0',
author='David Buckley',
author_email='david@davidbuckley.ca',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
install_requires=[
'toml',
'typer',
'SQLAlchemy',
'feedgen',
],
entry_points = {
'console_scripts': [
'event-scan=event_scan.cli:app'
]
}
)
| 22.269231 | 50 | 0.613126 |
acfa433893739739dbd8a1f63d2852bd2cca853d | 5,520 | py | Python | contrib/seeds/makeseeds.py | thebitradio/Bitradio | bbf306b9834b57fc8373a65b9f3b97f77fb2ff04 | [
"MIT"
] | 81 | 2017-04-13T05:59:28.000Z | 2021-08-23T05:35:49.000Z | contrib/seeds/makeseeds.py | thebitradio/Bitradio | bbf306b9834b57fc8373a65b9f3b97f77fb2ff04 | [
"MIT"
] | 17 | 2017-07-04T11:57:53.000Z | 2022-02-10T22:58:17.000Z | contrib/seeds/makeseeds.py | thebitradio/Bitradio | bbf306b9834b57fc8373a65b9f3b97f77fb2ff04 | [
"MIT"
] | 45 | 2017-06-19T06:43:07.000Z | 2020-02-04T20:55:05.000Z | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/BitRadioCore:1.1.0.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 32.093023 | 186 | 0.56721 |
acfa4392f176eb23d077033352471f3be3c31167 | 3,209 | py | Python | serve.py | harttu/keras-bert-ner | 5fcf682ea02fd042a2df06df63d877d14fdc8d16 | [
"MIT"
] | null | null | null | serve.py | harttu/keras-bert-ner | 5fcf682ea02fd042a2df06df63d877d14fdc8d16 | [
"MIT"
] | null | null | null | serve.py | harttu/keras-bert-ner | 5fcf682ea02fd042a2df06df63d877d14fdc8d16 | [
"MIT"
] | null | null | null | import os
import sys
import unicodedata
from flask import Flask, request
import numpy as np
import tensorflow as tf
from common import process_sentences, load_ner_model
from common import encode, write_result
from common import argument_parser
DEFAULT_MODEL_DIR = 'ner-model'
app = Flask(__name__)
@app.route('/')
def tag():
text = request.values['text']
tokenized = request.values.get('tokenized') in ('1', 'True', 'true')
return app.tagger.tag(text, tokenized)
class Tagger(object):
def __init__(self, model, tokenizer, labels, config):
self.model = model
self.tokenizer = tokenizer
self.labels = labels
self.config = config
self.session = None
self.graph = None
def tag(self, text, tokenized=False):
max_seq_len = self.config['max_seq_length']
inv_label_map = { i: l for i, l in enumerate(self.labels) }
if tokenized:
words = text.split() # whitespace tokenization
else:
words = tokenize(text) # approximate BasicTokenizer
dummy = ['O'] * len(words)
data = process_sentences([words], [dummy], self.tokenizer, max_seq_len)
x = encode(data.combined_tokens, self.tokenizer, max_seq_len)
if self.session is None or self.graph is None:
probs = self.model.predict(x, batch_size=8) # assume singlethreaded
else:
with self.session.as_default():
with self.graph.as_default():
probs = self.model.predict(x, batch_size=8)
preds = np.argmax(probs, axis=-1)
pred_labels = []
for i, pred in enumerate(preds):
pred_labels.append([inv_label_map[t]
for t in pred[1:len(data.tokens[i])+1]])
lines = write_result(
'output.tsv', data.words, data.lengths,
data.tokens, data.labels, pred_labels, mode='predict'
)
return ''.join(lines)
@classmethod
def load(cls, model_dir):
# session/graph for multithreading, see https://stackoverflow.com/a/54783311
session = tf.Session()
graph = tf.get_default_graph()
with graph.as_default():
with session.as_default():
model, tokenizer, labels, config = load_ner_model(model_dir)
tagger = cls(model, tokenizer, labels, config)
tagger.session = session
tagger.graph = graph
return tagger
punct_chars = set([
chr(i) for i in range(sys.maxunicode)
if (unicodedata.category(chr(i)).startswith('P') or
((i >= 33 and i <= 47) or (i >= 58 and i <= 64) or
(i >= 91 and i <= 96) or (i >= 123 and i <= 126)))
])
translation_table = str.maketrans({ c: ' '+c+' ' for c in punct_chars })
def tokenize(text):
return text.translate(translation_table).split()
def main(argv):
argparser = argument_parser('serve')
args = argparser.parse_args(argv[1:])
if args.ner_model_dir is None:
args.ner_model_dir = DEFAULT_MODEL_DIR
app.tagger = Tagger.load(args.ner_model_dir)
app.run(port=8080)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 30.855769 | 84 | 0.615145 |
acfa440d2974dbbdfaafbb5683f4cbff584285b8 | 4,291 | py | Python | bsp/air640w/rtt/rtconfig.py | 1021256354/LuatOS | d811c3740a05aa8f5eaebf192afc11b09847be21 | [
"MIT"
] | 1 | 2020-11-30T07:43:43.000Z | 2020-11-30T07:43:43.000Z | bsp/air640w/rtt/rtconfig.py | alaixu/LuatOS | 618d3f6bf9875465c8ed370d12b1ed425772cfd8 | [
"MIT"
] | 3 | 2021-12-14T21:54:24.000Z | 2022-01-04T16:48:18.000Z | bsp/air640w/rtt/rtconfig.py | alaixu/LuatOS | 618d3f6bf9875465c8ed370d12b1ed425772cfd8 | [
"MIT"
] | 2 | 2021-01-10T06:18:06.000Z | 2021-06-26T16:22:11.000Z | import os
import sys
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'E:/tool/env/tools/gnu_gcc/arm_gcc/mingw/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = 'C:/Program Files/IAR Systems/Embedded Workbench 6.0 Evaluation'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'release'
if os.path.exists(os.path.abspath('./') + '/drivers'):
gcc_linkscripts_path = 'drivers/linker_scripts/link.lds'
armcc_linkscripts_path = 'drivers/linker_scripts/link.sct'
iar_linkscripts_path = 'drivers/linker_scripts/link.icf'
else:
gcc_linkscripts_path = '../../drivers/linker_scripts/link.lds'
armcc_linkscripts_path = '../../drivers/linker_scripts/link.sct'
iar_linkscripts_path = '../../drivers/linker_scripts/link.icf'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
NM = PREFIX + 'nm'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -std=gnu99 -w -fno-common -fomit-frame-pointer -fno-short-enums -fsigned-char'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -lm -lgcc -lc' + ' -g --specs=nano.specs -nostartfiles -Wl,-Map=rtthread-w60x.map -Os -Wl,--gc-sections -Wl,--cref -Wl,--entry=Reset_Handler -Wl,--no-enum-size-warning -Wl,--no-wchar-size-warning -T ' + gcc_linkscripts_path
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g -Wall'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2 -Wall'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
POST_ACTION += 'python ./makeimg.py'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu=Cortex-M3'
CFLAGS = DEVICE + ' --apcs=interwork --c99 --gnu'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter ' + armcc_linkscripts_path + ' --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET \n'
POST_ACTION += 'python ./makeimg.py'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config ' + iar_linkscripts_path
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin \n'
POST_ACTION += 'python ./makeimg.py'
| 30.21831 | 247 | 0.595432 |
acfa44fc8ceb39db2e732e21787e2168317bc7c9 | 591 | py | Python | recipe/run_test.py | csdms-stack/dakotathon-csdms-recipe | d8fc7030b78340b603ff5203eea1568aea99dec9 | [
"MIT"
] | null | null | null | recipe/run_test.py | csdms-stack/dakotathon-csdms-recipe | d8fc7030b78340b603ff5203eea1568aea99dec9 | [
"MIT"
] | null | null | null | recipe/run_test.py | csdms-stack/dakotathon-csdms-recipe | d8fc7030b78340b603ff5203eea1568aea99dec9 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import os
import pymt.components
dakota_methods = ('CenteredParameterStudy',
'MultidimParameterStudy',
'VectorParameterStudy',
'PolynomialChaos',
'Sampling',
'StochasticCollocation')
os.mkdir('_testing')
os.chdir('_testing')
for method in dakota_methods:
Model = getattr(pymt.components, method)
model = Model()
for default in model.defaults:
print('{name}: {val} {units}'.format(
name=default[0], val=default[1][0], units=default[1][1]))
| 23.64 | 69 | 0.588832 |
acfa45c2b0fe0c69c3c69c1c639ec4b9dbe6282e | 493 | py | Python | checkio/Ice Base/Pangram/pangram.py | KenMercusLai/checkio | c7702221e1bc0b0b30425859ffa6c09722949d65 | [
"MIT"
] | 39 | 2015-02-09T13:24:12.000Z | 2019-05-16T17:51:19.000Z | checkio/Ice Base/Pangram/pangram.py | KenMercusLai/checkio | c7702221e1bc0b0b30425859ffa6c09722949d65 | [
"MIT"
] | 1 | 2019-10-21T16:18:14.000Z | 2019-10-21T16:18:14.000Z | checkio/Ice Base/Pangram/pangram.py | KenMercusLai/checkio | c7702221e1bc0b0b30425859ffa6c09722949d65 | [
"MIT"
] | 22 | 2015-01-30T18:00:05.000Z | 2021-05-22T02:57:23.000Z | __author__ = 'KenMercusLai'
def check_pangram(text):
return len({i.lower() for i in text if 'a' <= i.lower() <= 'z'}) == 26
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert check_pangram("The quick brown fox jumps over the lazy dog."), "brown fox"
assert not check_pangram("ABCDEF"), "ABC"
assert check_pangram(
"Bored? Craving a pub quiz fix? Why, just come to the Royal Oak!"
), "Bored?"
| 32.866667 | 85 | 0.661258 |
acfa45dd8ad7557bf82bea2f00eef6b580cb08d2 | 1,545 | py | Python | setup.py | timogoosen/rds-snap | bb3a6b02296e9d3d491c87bb9afa66641966d36b | [
"MIT"
] | null | null | null | setup.py | timogoosen/rds-snap | bb3a6b02296e9d3d491c87bb9afa66641966d36b | [
"MIT"
] | 1 | 2021-09-15T08:37:43.000Z | 2021-09-15T08:37:43.000Z | setup.py | timogoosen/rds-snap | bb3a6b02296e9d3d491c87bb9afa66641966d36b | [
"MIT"
] | 3 | 2021-08-17T14:03:16.000Z | 2021-09-23T11:05:03.000Z | from setuptools import setup, find_packages
from rds_snap.__main__ import version
from io import open
from os import path
import pathlib
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# README.md content
README = (HERE / "README.md").read_text()
# automatically captured required modules for install_requires in requirements.txt
with open(path.join(HERE, "requirements.txt"), encoding="utf-8") as f:
all_reqs = f.read().split("\n")
install_requires = [
x.strip()
for x in all_reqs
if ("git+" not in x) and (not x.startswith("#")) and (not x.startswith("-"))
]
dependency_links = [
x.strip().replace("git+", "") for x in all_reqs if "git+" not in x
]
setup(
name="rds-snap",
version=version(),
keywords="aws, rds, aurora, snapshot, cluster",
author="Ringier Tech",
author_email="tools@ringier.co.za",
description="Tool to allow for the management of aws rds aurora snapshots and clusters.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/RingierIMU/rds-snap",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3",
install_requires=install_requires,
dependency_links=dependency_links,
entry_points={
"console_scripts": [
"rds-snap=rds_snap.__main__:main",
],
},
)
| 30.9 | 93 | 0.662136 |
acfa466813b4d1d84b0ee0541e7983c6a522ac79 | 1,136 | py | Python | examples/librispeech/asr2/local/espnet_json_to_manifest.py | JiehangXie/PaddleSpeech | 60090b49ec27437127ab62358026dd5bb95fccc7 | [
"Apache-2.0"
] | 1,540 | 2017-11-14T13:26:33.000Z | 2021-11-09T14:05:08.000Z | examples/librispeech/asr2/local/espnet_json_to_manifest.py | JiehangXie/PaddleSpeech | 60090b49ec27437127ab62358026dd5bb95fccc7 | [
"Apache-2.0"
] | 599 | 2017-11-14T13:19:12.000Z | 2021-11-09T01:58:26.000Z | examples/librispeech/asr2/local/espnet_json_to_manifest.py | JiehangXie/PaddleSpeech | 60090b49ec27437127ab62358026dd5bb95fccc7 | [
"Apache-2.0"
] | 449 | 2017-11-14T12:48:46.000Z | 2021-11-06T09:34:33.000Z | #!/usr/bin/env python
import argparse
import json
def main(args):
with open(args.json_file, 'r') as fin:
data_json = json.load(fin)
# manifest format:
# {"input": [
# {"feat": "dev/deltafalse/feats.1.ark:842920", "name": "input1", "shape": [349, 83]}
# ],
# "output": [
# {"name": "target1", "shape": [12, 5002], "text": "NO APOLLO", "token": "▁NO ▁A PO LL O", "tokenid": "3144 482 352 269 317"}
# ],
# "utt2spk": "116-288045",
# "utt": "116-288045-0019"}
with open(args.manifest_file, 'w') as fout:
for key, value in data_json['utts'].items():
value['utt'] = key
fout.write(json.dumps(value, ensure_ascii=False))
fout.write("\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--json-file', type=str, default=None, help="espnet data json file.")
parser.add_argument(
'--manifest-file',
type=str,
default='maniefst.train',
help='manifest data json line file.')
args = parser.parse_args()
main(args)
| 30.702703 | 135 | 0.566021 |
acfa46bcf4597b6c2fc322693551458b9efe3d87 | 1,457 | py | Python | message/compose_message.py | zachjamesjohn/email-report | 8190f68f56f0a1cf45160172e1a28d6e258cc7db | [
"MIT"
] | 36 | 2018-04-10T16:01:12.000Z | 2022-02-13T19:03:12.000Z | message/compose_message.py | zachjamesjohn/email-report | 8190f68f56f0a1cf45160172e1a28d6e258cc7db | [
"MIT"
] | 1 | 2020-07-08T17:37:50.000Z | 2020-07-08T17:37:50.000Z | message/compose_message.py | zachjamesjohn/email-report | 8190f68f56f0a1cf45160172e1a28d6e258cc7db | [
"MIT"
] | 16 | 2018-04-10T18:48:10.000Z | 2021-05-20T02:03:28.000Z | """ This function call all of the other functions and composes the message body.
if your custom report modules are in the message folder, then the following import syntax will work:
import message.YourFile as YourFile
You can then call your functions inside the create_text_body() function using the syntax:
YourFile.YourFunction()
"""
import message.countdowns as countdowns #DEMO
import message.game_scores as game_scores #DEMO
#from
def create_text_body():
""" Call the functions that compose the email, building up the body
of the message step by step and then appending these to """
body_string = ''
""" this section calls the demo functions and builds up the
information I want in my email report (be aware these outputs are all strings)
substitute in your custom report functions here! """
day_of_the_year = countdowns.day_of_year() #DEMO
day_of_my_life = countdowns.time_alive() #DEMO
jays_game = game_scores.get_team_result_text('Toronto Blue Jays') #DEMO
""" this section adds the strings to the message body
substitute in the strings you generate here! """
body_string += 'Countdowns:\n' #DEMO
body_string += day_of_the_year #DEMO
body_string += day_of_my_life #DEMO
body_string += '\n\n' #to add some separation #DEMO
body_string += jays_game #DEMO
body_string += '\n\n' #to add some separation #DEMO
return body_string #return the string to email_me.py it is then written into the email
| 33.883721 | 100 | 0.753603 |
acfa46c303b07e31e4366504927a152af9b283cc | 828 | py | Python | RegExCheetSheet.py | PradeepDongre/PythonCode | 16cf192b6c0045ca5f1e9a314a1abb8edb188784 | [
"MIT"
] | null | null | null | RegExCheetSheet.py | PradeepDongre/PythonCode | 16cf192b6c0045ca5f1e9a314a1abb8edb188784 | [
"MIT"
] | null | null | null | RegExCheetSheet.py | PradeepDongre/PythonCode | 16cf192b6c0045ca5f1e9a314a1abb8edb188784 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 22 21:46:38 2018
@author: dongrp2
"""
. - Any Character Except New Line
\d - Digit (0-9)
\D - Not a Digit (0-9)
\w - Word Character (a-z, A-Z, 0-9, _)
\W - Not a Word Character
\s - Whitespace (space, tab, newline)
\S - Not Whitespace (space, tab, newline)
\b - Word Boundary
\B - Not a Word Boundary
^ - Beginning of a String
$ - End of a String
[] - Matches Characters in brackets
[^ ] - Matches Characters NOT in brackets
| - Either Or
( ) - Group
Quantifiers:
* - 0 or More
+ - 1 or More
? - 0 or One
{3} - Exact Number
{3,4} - Range of Numbers (Minimum, Maximum)
#### Sample Regexs ####
[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+ | 23 | 47 | 0.51087 |
acfa475b07afa264b2173b1c5388ad8d485ca266 | 1,452 | py | Python | nlplingo/tasks/common/examplegenerator.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | 3 | 2020-10-22T13:28:00.000Z | 2022-03-24T19:57:22.000Z | nlplingo/tasks/common/examplegenerator.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | null | null | null | nlplingo/tasks/common/examplegenerator.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | 1 | 2020-10-22T13:29:51.000Z | 2020-10-22T13:29:51.000Z | from collections import defaultdict
from abc import ABC, abstractmethod
class ExampleGenerator(ABC):
def __init__(self, event_domain, params, extractor_params, hyper_params):
"""
:type event_domain: nlplingo.event.event_domain.EventDomain
:type params: dict
:type extractor_params: dict
:type hyper_params: nlplingo.nn.extractor.HyperParameters
"""
self.event_domain = event_domain
self.params = params
self.extractor_params = extractor_params
self.hyper_params = hyper_params
self.statistics = defaultdict(int)
self.train_dev_test_mode = None
self.decode_mode = self.hyper_params.decode_mode
def examples_to_data_dict(self, examples, features):
"""
Organize examples into a dictionary which indexes by feature name.
:type examples: list[nlplingo.tasks.common.Datapoint
:type features: nlplingo.tasks.feature.feature_setting.FeatureSetting
"""
data_dict = defaultdict(list)
for example in examples:
example_data = example.to_data_dict(features)
for k, v in example_data.items():
data_dict[k].append(v)
return data_dict
@abstractmethod
def generate(self, docs):
"""
Generate candidates from nlplingo docs.
:param docs: list[nlplingo.text.text_theory.Document]
:return:
"""
pass | 35.414634 | 77 | 0.659091 |
acfa48a22e0d3a7f5e09e73f101a16509a670929 | 62 | py | Python | vulkan/__init__.py | mackst/vulkan | dc44a8496201821e07beeb3b4479beda6dc69ae3 | [
"Apache-2.0"
] | null | null | null | vulkan/__init__.py | mackst/vulkan | dc44a8496201821e07beeb3b4479beda6dc69ae3 | [
"Apache-2.0"
] | null | null | null | vulkan/__init__.py | mackst/vulkan | dc44a8496201821e07beeb3b4479beda6dc69ae3 | [
"Apache-2.0"
] | null | null | null | from vulkan._vulkan import * # noqa
__version__ = '1.1.71.2'
| 15.5 | 35 | 0.693548 |
acfa499736bb8fb83fa49b5704fb5449a18616a3 | 1,539 | py | Python | qlearning4k/games/catch.py | AlexGreason/DeepQLearning | bc554946bf84644b3430aeab9ad27c2c1f08f689 | [
"MIT"
] | null | null | null | qlearning4k/games/catch.py | AlexGreason/DeepQLearning | bc554946bf84644b3430aeab9ad27c2c1f08f689 | [
"MIT"
] | null | null | null | qlearning4k/games/catch.py | AlexGreason/DeepQLearning | bc554946bf84644b3430aeab9ad27c2c1f08f689 | [
"MIT"
] | null | null | null | __author__ = "Eder Santana"
import numpy as np
from .game import Game
class Catch(Game):
def __init__(self, grid_size=10):
self.grid_size = grid_size
self.won = False
self.reset()
def reset(self):
n = np.random.randint(0, self.grid_size-1, size=1)
m = np.random.randint(1, self.grid_size-2, size=1)
self.state = np.asarray([0, n, m])[np.newaxis]
@property
def name(self):
return "Catch"
@property
def nb_actions(self):
return 3
def play(self, action):
state = self.state
if action == 0:
action = -1
elif action == 1:
action = 0
else:
action = 1
f0, f1, basket = state[0]
new_basket = min(max(1, basket + action), self.grid_size-1)
f0 += 1
out = np.asarray([f0, f1, new_basket])
out = out[np.newaxis]
assert len(out.shape) == 2
self.state = out
def get_state(self):
im_size = (self.grid_size,) * 2
state = self.state[0]
canvas = np.zeros(im_size)
canvas[state[0], state[1]] = 1
canvas[-1, state[2]-1:state[2] + 2] = 1
return canvas
def get_score(self):
fruit_row, fruit_col, basket = self.state[0]
if fruit_row == self.grid_size-1:
if abs(fruit_col - basket) <= 1:
self.won = True
return 1
else:
return -1
else:
return 0
def is_over(self):
if self.state[0, 0] == self.grid_size-1:
return True
else:
return False
def is_won(self):
fruit_row, fruit_col, basket = self.state[0]
return fruit_row == self.grid_size-1 and abs(fruit_col - basket) <= 1
| 21.375 | 72 | 0.615335 |
acfa4a5a15cc191115a3201cfb817d7bac508fd0 | 4,036 | py | Python | examples/HypSelector/grouped_reactions/run_grouped_reactions_multinest.py | LoLab-VU/Gleipnir | 6085435f4840d403c0878b0d50192565ccc82965 | [
"MIT"
] | 2 | 2019-07-11T14:45:04.000Z | 2019-07-29T16:21:41.000Z | examples/HypSelector/grouped_reactions/run_grouped_reactions_multinest.py | LoLab-VU/Gleipnir | 6085435f4840d403c0878b0d50192565ccc82965 | [
"MIT"
] | 13 | 2019-05-20T03:14:03.000Z | 2021-01-07T17:23:23.000Z | examples/HypSelector/grouped_reactions/run_grouped_reactions_multinest.py | LoLab-VU/Gleipnir | 6085435f4840d403c0878b0d50192565ccc82965 | [
"MIT"
] | 3 | 2019-05-29T04:53:58.000Z | 2020-09-25T22:15:11.000Z | """
Example use of HypSelector with a grouped reactions set of model hypotheses
generated by HypBuilder. In this example, we do the selection
with Nested Sampling using MultiNest via Gleipnir.
Adapted from the grouped_reactions_example from HypBuilder:
https://github.com/LoLab-VU/HypBuilder/blob/master/grouped_reactions_example.csv
The data used in this example is synthetic data generated from model_0 with the default
parameters defined in the csv file; they are the last 10 timepoints.
"""
import numpy as np
try:
from pysb.simulator import ScipyOdeSimulator
except ImportError as err:
raise err
from gleipnir.pysb_utilities import HypSelector
if __name__ == '__main__':
# The HypBuilder format model csv file.
model_csv = 'grouped_reactions.csv'
# The timespan of the simulations.
tspan = np.linspace(0, 5, 20)
# Define what ODE solver to use.
solver = ScipyOdeSimulator
# Load the data.
data = np.load("model_0_AB_complex_data.npy")
# Define the fancy indexer or mask for the time points that the data
# corresponds to. -- In this case it is the last ten (out of 20) time points.
data_time_idxs = np.array(list(range(len(tspan))))[10:]
# Generate the observable data tuple for this observable: (data, data_sd, data_time_idxs)
obs_data_t = tuple((data,None,data_time_idxs))
# Generate the dictionary of observable data that is to be used in
# computing the likelihood. -- Here we are just using the AB_complex
# observable, which is the amount of A(B=1)%B(A=1).
observable_data = dict()
observable_data['AB_complex'] = obs_data_t
# Build the HypSelector.
selector = HypSelector(model_csv)
# Check the number of models that were generated.
n_models = selector.number_of_models()
print("Generated {} models from input csv".format(n_models))
# Append the needed observable to the model files
obs_line = "Observable(\'AB_complex\',A(B=1)%B(A=1))"
selector.append_to_models(obs_line)
# Now let's construct the Nested Samplers for the models.
# ns_version='multinest' will use the MulitNest code.
# ns_population_size=1000 will set the active population size for the Nested
# Sampling runs to 1000.
# log_likelihood_type='mse' will use the minus of the Mean Squared Error (mse)
# as the log_likelihood estimator.
# Input dictionary of additional keyword arguments to be passed to the
# Nested Samplers.
ns_kwargs = dict()
# Let's set the sampling efficiency for MultiNest to 0.3 which is
# supposed to good enough for evidence estimation (0.8 is recommended for
# when the posterior distributions are wanted).
ns_kwargs['sampling_efficiency'] = 0.3
selector.gen_nested_samplers(tspan, observable_data, solver=solver,
ns_version='multinest',
ns_population_size=100, ns_kwargs=ns_kwargs,
log_likelihood_type='mse')
# Do the Nested Sampling runs. -- The output is a pandas DataFrame.
# Note that the output is already sorted from highest to lowest log evidence.
# Running should take about 2-3 minutes with a population size of 100.
sorted_log_evidences = selector.run_nested_sampling()
# model_0 is the correct model (i.e., the data comes from a simulaiton of
# of model_0. However, the order should be: model_0, model_2, and then
# model_1.
print("The models and their log_evidence values-sorted:")
print(sorted_log_evidences)
print(" ")
# We can now also look at the Bayes factors which are computed as ratio of
# evidence values; i.e., evidence_model_column/evidence_model_row
bayes_factors = selector.bayes_factors()
print("DataFrame of the Bayes factor matrix:")
print(bayes_factors)
print(" ")
# Let's look at the Bayes factors for model_0; i.e.,
# evidence_model_0/evidence_other_model
print("Bayes factors for the model_0 numerator ratios:")
print(bayes_factors['model_0'])
| 45.348315 | 93 | 0.716303 |
acfa4bef1ab681e4e3d6367f61f5e50c02187341 | 2,123 | py | Python | employeeproject/urls.py | TeeblaQ1/employeeAPI | 557f7db75fffa7bfc31603c3511ccffe597f59d9 | [
"MIT"
] | null | null | null | employeeproject/urls.py | TeeblaQ1/employeeAPI | 557f7db75fffa7bfc31603c3511ccffe597f59d9 | [
"MIT"
] | null | null | null | employeeproject/urls.py | TeeblaQ1/employeeAPI | 557f7db75fffa7bfc31603c3511ccffe597f59d9 | [
"MIT"
] | null | null | null | """employeeproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
# from rest_auth.registration.views import VerifyEmailView
# from rest_framework.documentation import include_docs_urls
# from rest_framework_swagger.views import get_swagger_view
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Employee API",
default_version='v1',
description="This is an API that can be used for CRUD operations on Employee Entities.",
terms_of_service="https://github.com/TeeblaQ1",
contact=openapi.Contact(email="oluwateeblaq@gmail.com"),
license=openapi.License(name="MIT License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/v1/', include('entities.urls')),
path('api-auth/', include('rest_framework.urls')),
path('api/v1/rest-auth/', include('rest_auth.urls')),
# path('api/v1/rest-auth/registration/account-confirm-email', VerifyEmailView.as_view(template_name='rest_framework:login'), name='account_email_confirmation_sent'),
path('api/v1/rest-auth/registration/', include('rest_auth.registration.urls')),
# path('docs/', swagger_view),
path('', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
| 42.46 | 169 | 0.730099 |
acfa4cd8be4c7fb3a584bfa15fcebe72e1973b20 | 13,799 | py | Python | networkapiclient/OptionPool.py | shildenbrand/GloboNetworkAPI-client-python | 728ea9d13e3004e62586f5eb6ae2eae2bc41a50e | [
"Apache-2.0"
] | 16 | 2015-05-09T16:33:01.000Z | 2019-10-24T19:06:03.000Z | networkapiclient/OptionPool.py | shildenbrand/GloboNetworkAPI-client-python | 728ea9d13e3004e62586f5eb6ae2eae2bc41a50e | [
"Apache-2.0"
] | 3 | 2019-08-09T20:18:12.000Z | 2019-11-11T17:23:48.000Z | networkapiclient/OptionPool.py | shildenbrand/GloboNetworkAPI-client-python | 728ea9d13e3004e62586f5eb6ae2eae2bc41a50e | [
"Apache-2.0"
] | 15 | 2015-02-03T17:10:59.000Z | 2021-05-14T21:01:37.000Z | # -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networkapiclient.exception import InvalidParameterError
from networkapiclient.utils import is_valid_int_param
from networkapiclient.ApiGenericClient import ApiGenericClient
class OptionPool(ApiGenericClient):
def __init__(self, networkapi_url, user, password, user_ldap=None):
"""Class constructor receives parameters to connect to the networkAPI.
:param networkapi_url: URL to access the network API.
:param user: User for authentication.
:param password: Password for authentication.
"""
super(
OptionPool,
self).__init__(
networkapi_url,
user,
password,
user_ldap)
def add(self, tipo_opcao, nome_opcao):
"""Inserts a new Option Pool and returns its identifier.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: Following dictionary:
::
{'id': < id > , 'type':<type>, 'name':<name>}
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
#optionpool_map = dict()
#optionpool_map['type'] = tipo_opcao
#optionpool_map['name'] = nome_opcao
url='api/pools/options/save/'
return self.post(url, {'type': tipo_opcao, "name":nome_opcao })
def modify(self, id_option_pool, tipo_opcao, nome_opcao):
"""Change Option Pool from by id.
:param id_option_pool: Identifier of the Option Pool. Integer value and greater than zero.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: None
:raise InvalidParameterError: Option Pool identifier is null or invalid.
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise optionpoolNotFoundError: Option pool not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_pool):
raise InvalidParameterError(
u'The identifier of Option Pool is invalid or was not informed.')
#optionpool_map = dict()
#optionpool_map['type'] = tipo_opcao
#optionpool_map['name'] = nome_opcao_txt
url = 'api/pools/options/' + str(id_option_pool) + '/'
return self.put(url,{'type': tipo_opcao, "name":nome_opcao } )
def remove(self, id_option_pool):
"""Remove Option pool by identifier and all Environment related .
:param id_option_pool: Identifier of the Option Pool. Integer value and greater than zero.
:return: None
:raise InvalidParameterError: Option Pool identifier is null and invalid.
:raise optionpoolNotFoundError: Option Pool not registered.
:raise optionpoolError: Option Pool associated with Pool.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_pool):
raise InvalidParameterError(
u'The identifier of Option Pool is invalid or was not informed.')
url = 'api/pools/options/' + str(id_option_pool) + '/'
return self.delete(url)
def get_option_pool(self, id_option_pool):
"""Search Option Pool by id.
:param id_option_pool: Identifier of the Option Pool. Integer value and greater than zero.
:return: Following dictionary:
::
{‘id’: < id_option_pool >,
‘type’: < tipo_opcao >,
‘name’: < nome_opcao_txt >}
:raise InvalidParameterError: Option Pool identifier is null and invalid.
:raise optionpoolNotFoundError: Option Pool not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_pool):
raise InvalidParameterError(
u'The identifier of Option Pool is invalid or was not informed.')
url = 'api/pools/options/' + str(id_option_pool) + '/'
return self.get(url)
def get_all_option_pool(self, option_type=None):
"""Get all Option Pool.
:return: Dictionary with the following structure:
::
{[{‘id’: < id >,
‘type’: < tipo_opcao >,
‘name’: < nome_opcao_txt >}, ... other option pool ...] }
:raise optionpoolNotFoundError: Option Pool not registered.
:raise DataBaseError: Can't connect to networkapi database.
:raise XMLError: Failed to generate the XML response.
"""
if option_type:
url = 'api/pools/options/?type='+option_type
else:
url = 'api/pools/options/'
return self.get(url)
def get_all_environment_option_pool(self, id_environment=None, option_id=None, option_type=None):
"""Get all Option VIP by Environment .
:return: Dictionary with the following structure:
::
{[{‘id’: < id >,
option: {
'id': <id>
'type':<type>
'name':<name> }
environment: {
'id':<id>
.... all environment info }
etc to option pools ...] }
:raise EnvironmentVipNotFoundError: Environment Pool not registered.
:raise DataBaseError: Can't connect to networkapi database.
:raise XMLError: Failed to generate the XML response.
"""
url='api/pools/environment_options/'
if id_environment:
if option_id:
if option_type:
url = url + "?environment_id=" + str(id_environment)+ "&option_id=" + str(option_id) + "&option_type=" + option_type
else:
url = url + "?environment_id=" + str(id_environment)+ "&option_id=" + str(option_id)
else:
if option_type:
url = url + "?environment_id=" + str(id_environment) + "&option_type=" + option_type
else:
url = url + "?environment_id=" + str(id_environment)
elif option_id:
if option_type:
url = url + "?option_id=" + str(option_id) + "&option_type=" + option_type
else:
url = url + "?option_id=" + str(option_id)
elif option_type:
url = url + "?option_type=" + option_type
return self.get(url)
def associate_environment_option_pool(self, id_option_pool, id_environment):
"""Create a relationship of optionpool with Environment.
:param id_option_pool: Identifier of the Option Pool. Integer value and greater than zero.
:param id_environment: Identifier of the Environment . Integer value and greater than zero.
:return: Dictionary with the following structure:
{‘id’: < id >,
option: {
'id': <id>
'type':<type>
'name':<name> }
environment: {
'id':<id>
.... all environment info }
}
:raise InvalidParameterError: Option Pool/Environment Pool identifier is null and/or invalid.
:raise optionpoolNotFoundError: Option Pool not registered.
:raise EnvironmentVipNotFoundError: Environment Pool not registered.
:raise optionpoolError: Option Pool is already associated with the environment pool.
:raise UserNotAuthorizedError: User does not have authorization to make this association.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_pool):
raise InvalidParameterError(
u'The identifier of Option Pool is invalid or was not informed.')
if not is_valid_int_param(id_environment):
raise InvalidParameterError(
u'The identifier of Environment Pool is invalid or was not informed.')
url= 'api/pools/environment_options/save/'
return self.post(url, {'option_id': id_option_pool,"environment_id":id_environment })
def get_environment_option_pool(self, environment_option_id ):
"""Get Environment Option Pool by id .
:return: Dictionary with the following structure:
::
{‘id’: < id >,
option: {
'id': <id>
'type':<type>
'name':<name> }
environment: {
'id':<id>
.... all environment info }
}
:raise EnvironmentVipNotFoundError: Environment Pool not registered.
:raise DataBaseError: Can't connect to networkapi database.
:raise XMLError: Failed to generate the XML response.
"""
url = 'api/pools/environment_options/' + str(environment_option_id) + '/'
return self.get(url)
def disassociate_environment_option_pool(self, environment_option_id):
"""Remove a relationship of optionpool with Environment.
:param id_option_pool: Identifier of the Option Pool. Integer value and greater than zero.
:param id_environment: Identifier of the Environment Pool. Integer value and greater than zero.
:return: { 'id': < environment_option_id> }
:raise InvalidParameterError: Option Pool/Environment Pool identifier is null and/or invalid.
:raise optionpoolNotFoundError: Option Pool not registered.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise optionpoolError: Option pool is not associated with the environment pool
:raise UserNotAuthorizedError: User does not have authorization to make this association.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(environment_option_id):
raise InvalidParameterError(
u'The identifier of Option Pool is invalid or was not informed.')
if not is_valid_int_param(environment_option_id):
raise InvalidParameterError(
u'The identifier of Environment Pool is invalid or was not informed.')
url = 'api/pools/environment_options/' + str(environment_option_id) + '/'
return self.delete(url)
def modify_environment_option_pool(self, environment_option_id, id_option_pool,id_environment ):
"""Remove a relationship of optionpool with Environment.
:param id_option_pool: Identifier of the Option Pool. Integer value and greater than zero.
:param id_environment: Identifier of the Environment Pool. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{‘id’: < id >,
option: {
'id': <id>
'type':<type>
'name':<name> }
environment: {
'id':<id>
.... all environment info }
}
:raise InvalidParameterError: Option Pool/Environment Pool identifier is null and/or invalid.
:raise optionpoolNotFoundError: Option Pool not registered.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise optionpoolError: Option pool is not associated with the environment pool
:raise UserNotAuthorizedError: User does not have authorization to make this association.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(environment_option_id):
raise InvalidParameterError(
u'The identifier of Environment Option Pool is invalid or was not informed.')
#optionpool_map = dict()
#optionpool_map['option'] = option_id
#optionpool_map['environment'] = environment_id
url = 'api/pools/environment_options/' + str(environment_option_id) + '/'
return self.put(url, {'option_id': id_option_pool,"environment_id":id_environment }) | 39.538682 | 137 | 0.633452 |
acfa4d24d2a394e1b06f0ec1799e58b343d0a5f4 | 7,033 | py | Python | src/oci/oda/models/update_oda_instance_attachment_details.py | pabs3/oci-python-sdk | 437ba18ce39af2d1090e277c4bb8750c89f83021 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/oda/models/update_oda_instance_attachment_details.py | pabs3/oci-python-sdk | 437ba18ce39af2d1090e277c4bb8750c89f83021 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/oda/models/update_oda_instance_attachment_details.py | pabs3/oci-python-sdk | 437ba18ce39af2d1090e277c4bb8750c89f83021 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateOdaInstanceAttachmentDetails(object):
"""
ODA attachment details to be updated.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateOdaInstanceAttachmentDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param attachment_metadata:
The value to assign to the attachment_metadata property of this UpdateOdaInstanceAttachmentDetails.
:type attachment_metadata: str
:param restricted_operations:
The value to assign to the restricted_operations property of this UpdateOdaInstanceAttachmentDetails.
:type restricted_operations: list[str]
:param owner:
The value to assign to the owner property of this UpdateOdaInstanceAttachmentDetails.
:type owner: oci.oda.models.OdaInstanceAttachmentOwner
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateOdaInstanceAttachmentDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateOdaInstanceAttachmentDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'attachment_metadata': 'str',
'restricted_operations': 'list[str]',
'owner': 'OdaInstanceAttachmentOwner',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'attachment_metadata': 'attachmentMetadata',
'restricted_operations': 'restrictedOperations',
'owner': 'owner',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._attachment_metadata = None
self._restricted_operations = None
self._owner = None
self._freeform_tags = None
self._defined_tags = None
@property
def attachment_metadata(self):
"""
**[Required]** Gets the attachment_metadata of this UpdateOdaInstanceAttachmentDetails.
Attachment specific metadata. Defined by the target service.
:return: The attachment_metadata of this UpdateOdaInstanceAttachmentDetails.
:rtype: str
"""
return self._attachment_metadata
@attachment_metadata.setter
def attachment_metadata(self, attachment_metadata):
"""
Sets the attachment_metadata of this UpdateOdaInstanceAttachmentDetails.
Attachment specific metadata. Defined by the target service.
:param attachment_metadata: The attachment_metadata of this UpdateOdaInstanceAttachmentDetails.
:type: str
"""
self._attachment_metadata = attachment_metadata
@property
def restricted_operations(self):
"""
**[Required]** Gets the restricted_operations of this UpdateOdaInstanceAttachmentDetails.
List of operations that are restricted while this instance is attached.
:return: The restricted_operations of this UpdateOdaInstanceAttachmentDetails.
:rtype: list[str]
"""
return self._restricted_operations
@restricted_operations.setter
def restricted_operations(self, restricted_operations):
"""
Sets the restricted_operations of this UpdateOdaInstanceAttachmentDetails.
List of operations that are restricted while this instance is attached.
:param restricted_operations: The restricted_operations of this UpdateOdaInstanceAttachmentDetails.
:type: list[str]
"""
self._restricted_operations = restricted_operations
@property
def owner(self):
"""
**[Required]** Gets the owner of this UpdateOdaInstanceAttachmentDetails.
:return: The owner of this UpdateOdaInstanceAttachmentDetails.
:rtype: oci.oda.models.OdaInstanceAttachmentOwner
"""
return self._owner
@owner.setter
def owner(self, owner):
"""
Sets the owner of this UpdateOdaInstanceAttachmentDetails.
:param owner: The owner of this UpdateOdaInstanceAttachmentDetails.
:type: oci.oda.models.OdaInstanceAttachmentOwner
"""
self._owner = owner
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this UpdateOdaInstanceAttachmentDetails.
Simple key-value pair that is applied without any predefined name, type, or scope.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this UpdateOdaInstanceAttachmentDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this UpdateOdaInstanceAttachmentDetails.
Simple key-value pair that is applied without any predefined name, type, or scope.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this UpdateOdaInstanceAttachmentDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this UpdateOdaInstanceAttachmentDetails.
Usage of predefined tag keys. These predefined keys are scoped to namespaces.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this UpdateOdaInstanceAttachmentDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this UpdateOdaInstanceAttachmentDetails.
Usage of predefined tag keys. These predefined keys are scoped to namespaces.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this UpdateOdaInstanceAttachmentDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 36.066667 | 245 | 0.677662 |
acfa503fc853460ad3e1c4aa0dcfa4c58ae8a69e | 1,462 | py | Python | DomainSearchViewer/additional/Logging.py | akowasch/DomainSearch | e5d59abcb10c47377ae6ff10029050cde3c3eda0 | [
"BSD-2-Clause"
] | null | null | null | DomainSearchViewer/additional/Logging.py | akowasch/DomainSearch | e5d59abcb10c47377ae6ff10029050cde3c3eda0 | [
"BSD-2-Clause"
] | null | null | null | DomainSearchViewer/additional/Logging.py | akowasch/DomainSearch | e5d59abcb10c47377ae6ff10029050cde3c3eda0 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The Logging component of the Viewer.
"""
import logging.handlers
################################################################################
class Logging:
"""
This class handles the console and file logging.
"""
def __init__(self, filename):
self._logger = logging.getLogger(filename)
self._logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
file_handler = logging.handlers.RotatingFileHandler(
'logs/' + filename + '.log',
encoding = 'utf8',
maxBytes = 1048576, # 1 MB
backupCount = 2)
file_handler.setLevel(logging.DEBUG)
console_formatter = logging.Formatter(
"%(asctime)s - %(levelname)-7s - %(name)-21s - %(message)s",
"%Y-%m-%d %H:%M:%S")
file_formatter = logging.Formatter(
"%(asctime)s - %(levelname)-7s - %(message)s",
"%Y-%m-%d %H:%M:%S")
console_handler.setFormatter(console_formatter)
file_handler.setFormatter(file_formatter)
self._logger.addHandler(console_handler)
self._logger.addHandler(file_handler)
############################################################################
def get_logger(self):
"""
Method to return the logger.
@return: the logger
"""
return self._logger
| 26.107143 | 80 | 0.523256 |
acfa505dbe818da98ca3a0106be64dc194d2e498 | 7,407 | py | Python | genie-main/nccomp.py | JUNPENGZ/cgenie.muffin | 43bc8dc025428a5141866d762129b2cfaf1345ed | [
"MIT"
] | 34 | 2018-05-28T08:11:58.000Z | 2022-03-18T10:13:30.000Z | genie-main/nccomp.py | ruiying-ocean/cgenie.muffin | 7d4c6976779d9e120ad989831eb12cdd33ea9fa6 | [
"MIT"
] | 29 | 2018-09-19T22:58:49.000Z | 2021-05-20T12:24:47.000Z | genie-main/nccomp.py | ruiying-ocean/cgenie.muffin | 7d4c6976779d9e120ad989831eb12cdd33ea9fa6 | [
"MIT"
] | 35 | 2018-06-11T19:14:02.000Z | 2022-02-13T19:50:24.000Z | #!/usr/local/bin/python
#
# $Id$
#
'''A program to compare two NetCDF files.'''
import sys
import types
from sets import Set
import os.path
import cdms
import MV
# define useful globals
TRUE,FALSE=1,0
def simpleComp(fileA, fileB, tol=0.0):
'''A simple comparison function.
Attribute names and values are compared first.
Then the data arrays are compared within a
tolerance on a cell by cell basis.'''
# open files
fpA = cdms.open(fileA)
fpB = cdms.open(fileB)
# == global attributes ==
# compare attribute names
message = 'global attributes: '
globalNamesA = Set(fpA.listglobal())
globalNamesB = Set(fpB.listglobal())
symmetricDiff = globalNamesA ^ globalNamesB
if len(symmetricDiff) != 0:
(detailA, detailB) = setDiff(globalNamesA,globalNamesB,symmetricDiff)
return (FALSE,message,detailA,detailB)
# compare values
for globalName in globalNamesA:
# limit our checks to attributes with string values
if isinstance(eval(r'fpA.'+globalName),types.StringType):
globalValueA = eval(r'fpA.'+globalName)
globalValueB = eval(r'fpB.'+globalName)
if globalValueA != globalValueB:
message += globalName + ' values'
return(FALSE,message,globalValueA,globalValueB)
# == dimensions ==
# compare dimension names
dimNamesA = Set(fpA.listdimension())
dimNamesB = Set(fpB.listdimension())
symmetricDiff = dimNamesA ^ dimNamesB
if len(symmetricDiff) != 0:
message = 'dimensions:'
(detailA, detailB) = setDiff(dimNamesA,dimNamesB,symmetricDiff)
return (FALSE,message,detailA,detailB)
# loop over dimensions
for dimName in dimNamesA:
message = 'dimensions: '+ dimName
# compare attribute names
dimAttNamesA = Set(fpA[dimName].attributes.keys())
dimAttNamesB = Set(fpA[dimName].attributes.keys())
symmetricDiff = dimAttNamesA ^ dimAttNamesB
if len(symmetricDiff) != 0:
(detailA, detailB) = setDiff(dimAttNamesA,dimAttNamesB,symmetricDiff)
return (FALSE,message,detailA,detailB)
# compare attribute values
for dimAttName in dimAttNamesA:
# assuming objects we can compare
dimAttValueA = eval(r"fpA['"+dimName+r"']."+dimAttName)
dimAttValueB = eval(r"fpB['"+dimName+r"']."+dimAttName)
if dimAttValueA != dimAttValueB:
message += ': '+dimAttName
return (FALSE,message,dimAttValueA,dimAttValueB)
# compare data
dimDataShapeA = MV.shape(fpA[dimName])
dimDataShapeB = MV.shape(fpB[dimName])
if dimDataShapeA != dimDataShapeB:
message += ': data array shape'
return (FALSE,message,str(dimDataShapeA),str(dimDataShapeB))
maxDelta = MV.maximum(abs(fpA[dimName][:] - fpB[dimName][:]))
if maxDelta > tol:
message += ': delta: '+str(maxDelta)+' > '+str(tol)
return (FALSE,message,'','')
# == variables ==
# compare variable names
varNamesA = Set(fpA.listvariables())
varNamesB = Set(fpB.listvariables())
symmetricDiff = varNamesA ^ varNamesB
if len(symmetricDiff) != 0:
message = 'variables:'
(detailA, detailB) = setDiff(varNamesA,varNamesB,symmetricDiff)
return (FALSE,message,detailA,detailB)
# loop over variables
for varName in varNamesA:
message = 'variables: '+varName
# compare attribute names
varAttNamesA = Set(fpA[varName].attributes.keys())
varAttNamesB = Set(fpA[varName].attributes.keys())
symmetricDiff = varAttNamesA ^ varAttNamesB
if len(symmetricDiff) != 0:
(detailA, detailB) = setDiff(varAttNamesA,varAttNamesB,symmetricDiff)
return (FALSE,message,detailA,detailB)
# compare attribute values
for varAttName in varAttNamesA:
# assuming objects we can compare
varAttValueA = eval(r"fpA['"+varName+r"']."+varAttName)
varAttValueB = eval(r"fpB['"+varName+r"']."+varAttName)
if varAttValueA != varAttValueB:
message += ': '+varAttName
return (FALSE,message,varAttValueA,varAttValueB)
# compare data
varDataShapeA = MV.shape(fpA[varName])
varDataShapeB = MV.shape(fpB[varName])
if varDataShapeA != varDataShapeB:
message += ': data array shape'
return (FALSE,message,str(varDataShapeA),str(varDataShapeB))
maxDelta = MV.maximum(abs(fpA[varName][:] - fpB[varName][:]))
if maxDelta > tol:
message += ': delta: '+str(maxDelta)+' > '+str(tol)
return (FALSE,message,'','')
# close files
fpA.close()
fpB.close()
return (TRUE,'','','')
def setDiff(setA, setB, symmetricDiff):
detailA = setA & symmetricDiff
detailB = setB & symmetricDiff
return (detailA,detailB)
def compVarSlice(fileA, fileB, var, dim, tol=0.0, start=0, end=0):
'''Compare a slice (of given dimension) through named variable'''
# open files
fpA = cdms.open(fileA)
fpB = cdms.open(fileB)
# check named variable present in both files
varsA = Set(fpA.listvariables())
varsB = Set(fpB.listvariables())
commonVars = varsA & varsB
if var not in commonVars:
fpA.close()
fpB.close()
return (FALSE,var+' not common',varsA,varsB)
# ditto for named dimension
dimsA = Set(fpA.listdimension())
dimsB = Set(fpB.listdimension())
commonDims = dimsA & dimsB
if dim not in commonDims:
fpA.close()
fpB.close()
return (FALSE,dim+' not common',dimsA,dimsB)
# get the slices
sliceA = eval(r"fpA('"+var+"',"+dim+"=slice("+str(start)+","+str(end)+"))")
sliceB = eval(r"fpB('"+var+"',"+dim+"=slice("+str(start)+","+str(end)+"))")
# close files
fpA.close()
fpB.close()
# ensure dimensions of slices correct
if sliceA.shape != sliceB.shape:
return (FALSE,'different shapes',sliceA.shape,sliceB.shape)
if sliceA.shape[0] != end - start:
return (FALSE,'slice size wrong',str(sliceA.shape[0]),str(end-start))
if sliceA.shape[0] == 0:
return (FALSE,'slice size zero',str(sliceA.shape[0]),str(end-start))
# make actual comparison
maxDelta = MV.maximum(abs(sliceA - sliceB))
if maxDelta > tol:
return (FALSE,'max diff > '+str(tol),'','')
else:
return (TRUE,'','','')
# could take difference of the averages too
if __name__ == '__main__':
tol = 0.0
# parse command line
numArgs = len(sys.argv)
if numArgs < 3 or numArgs > 4:
print 'usage: fileA.nc fileB.nc tol'
sys.exit(1)
fileA = sys.argv[1]
fileB = sys.argv[2]
if numArgs == 4:
tol = float(sys.argv[3])
# make sure files exist!
if not os.path.isfile(fileA):
print "does not exist: %s" % fileA
sys.exit(1)
if not os.path.isfile(fileB):
print "does not exist: %s" % fileB
sys.exit(1)
# make the comparison
(status,message,detailA,detailB) = simpleComp(fileA,fileB,tol)
# report
if status == FALSE:
print "files differ."
print "%s" % message
print "fileA: %s" % detailA
print "fileB: %s" % detailB
sys.exit(1)
| 34.291667 | 81 | 0.611179 |
acfa5235ceea4742735cbfec2357ace8691393b7 | 1,868 | py | Python | lexicon/tests/providers/test_infoblox.py | 1500cloud/lexicon | 8fa65a4e8c844d5d7c33f55ac6e66242f7d415d9 | [
"MIT"
] | 1 | 2019-08-17T23:09:24.000Z | 2019-08-17T23:09:24.000Z | lexicon/tests/providers/test_infoblox.py | 1500cloud/lexicon | 8fa65a4e8c844d5d7c33f55ac6e66242f7d415d9 | [
"MIT"
] | null | null | null | lexicon/tests/providers/test_infoblox.py | 1500cloud/lexicon | 8fa65a4e8c844d5d7c33f55ac6e66242f7d415d9 | [
"MIT"
] | 1 | 2019-11-04T17:12:53.000Z | 2019-11-04T17:12:53.000Z | """
A note about running these tests against a Infoblox Environment
1. Make sure the NIOS Version support WAPI 2.6.1
2. Have a valid Certificate from a public CA installed at the Infoblox
3. Create a Authoritative zone test.local in a view (default if no views are created)
4. Create a User with permissions RW permissions
for the zone test.local and enable the User for API
Environment Variables work fine when envoking lexicon manually
LEXICON_INFOBLOX_AUTH_USER={user} LEXICON_INFOBLOX_AUTH_PSW={password}
lexicon infoblox --ib-host dns1.int.metro-cc.com --ib-view internal
create test.local A --content 10.10.10.11 --name lexicon1
Invoking the py.test however fails
LEXICON_LIVE_TESTS=true LEXICON_INFOBLOX_AUTH_USER={username} LEXICON_INFOBLOX_AUTH_PSW={password}
py.test tests/providers/test_infoblox.py
Both parameters are populated with:
auth_user = placeholder_auth_user
auth_psw = placeholder_auth_psw
"""
import os
from unittest import TestCase
from lexicon.tests.providers.integration_tests import IntegrationTests
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from integration_tests.IntegrationTests
class InfobloxProviderTests(TestCase, IntegrationTests):
"""TestCase for Infoblox"""
provider_name = 'infoblox'
domain = 'test.local'
def _test_parameters_overrides(self):
# Workaround ENV problems during testing
env_user = os.environ.get('LEXICON_INFOBLOX_AUTH_USER', 'infoblox')
env_psw = os.environ.get('LEXICON_INFOBLOX_AUTH_PSW', 'default')
return {'ib_host': 'dns1.int.metro-cc.com', 'ib_view': 'internal',
'auth_user': env_user, 'auth_psw': env_psw}
def _filter_headers(self):
return ['Authorization', 'Cookie', 'set-cookie']
| 43.44186 | 98 | 0.764989 |
acfa527ead367b94ddd3bbfbdea22af42199e03c | 787 | py | Python | example/dlf_app/migrations/0001_initial.py | Gerleff/django-location-field | 092e7e352146f54f602b50b53ae0cd3f82860db3 | [
"MIT"
] | 443 | 2015-01-12T12:33:30.000Z | 2022-03-31T07:23:23.000Z | example/dlf_app/migrations/0001_initial.py | Gerleff/django-location-field | 092e7e352146f54f602b50b53ae0cd3f82860db3 | [
"MIT"
] | 104 | 2015-03-24T09:38:32.000Z | 2022-03-22T20:20:49.000Z | example/dlf_app/migrations/0001_initial.py | Gerleff/django-location-field | 092e7e352146f54f602b50b53ae0cd3f82860db3 | [
"MIT"
] | 122 | 2015-02-22T20:17:00.000Z | 2022-03-23T10:22:45.000Z | # Generated by Django 2.1.7 on 2019-03-20 12:30
from django.db import migrations, models
import django.db.models.deletion
import location_field.models.plain
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(max_length=255)),
('location', location_field.models.plain.PlainLocationField(max_length=63)),
('parent_place', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='dlf_app.Place')),
],
),
]
| 30.269231 | 140 | 0.631512 |
acfa52cde45bd172dbf43efc6761c8be4cea7bba | 453 | py | Python | Bonus/03Switch01.py | davidavg/OOP_Python | ca4e8376a50b9c81b5ac18c466bd8d147bdbe679 | [
"MIT"
] | null | null | null | Bonus/03Switch01.py | davidavg/OOP_Python | ca4e8376a50b9c81b5ac18c466bd8d147bdbe679 | [
"MIT"
] | null | null | null | Bonus/03Switch01.py | davidavg/OOP_Python | ca4e8376a50b9c81b5ac18c466bd8d147bdbe679 | [
"MIT"
] | null | null | null | '''
Created on Oct 3, 2018
@author: david avalos
'''
#Ask for user input
option = input("Please introduce one option:\nSum\nSubtraction\nMultiply\nDivide\n-->")
#Switch
if option == "Sum":
print("\nExecute a sum")
elif option == "Subtraction":
print("\nExecute a subtraction")
elif option == "Multiply":
print("\nExecute a multiplication")
elif option == "Divide":
print("\nExecute a division")
else:
print("\nInvalid option") | 22.65 | 87 | 0.671082 |
acfa530b4c4634348688b2299d603d515642b919 | 1,544 | py | Python | tests/urlpatterns_reverse/views.py | vincepandolfo/django | 67cf5efa31acb2916034afb15610b700695dfcb0 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2017-01-11T06:27:15.000Z | 2017-01-11T06:27:15.000Z | tests/urlpatterns_reverse/views.py | vincepandolfo/django | 67cf5efa31acb2916034afb15610b700695dfcb0 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/urlpatterns_reverse/views.py | vincepandolfo/django | 67cf5efa31acb2916034afb15610b700695dfcb0 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2019-10-22T12:16:53.000Z | 2019-10-22T12:16:53.000Z | from functools import partial, update_wrapper
from django.contrib.auth.decorators import user_passes_test
from django.http import HttpResponse
from django.urls import reverse_lazy
from django.views.generic import RedirectView
def empty_view(request, *args, **kwargs):
return HttpResponse('')
def absolute_kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def defaults_view(request, arg1, arg2):
pass
def nested_view(request):
pass
def erroneous_view(request):
import non_existent # NOQA
def pass_resolver_match_view(request, *args, **kwargs):
response = HttpResponse('')
response.resolver_match = request.resolver_match
return response
uncallable = None # neither a callable nor a string
class ViewClass(object):
def __call__(self, request, *args, **kwargs):
return HttpResponse('')
view_class_instance = ViewClass()
class LazyRedirectView(RedirectView):
url = reverse_lazy('named-lazy-url-redirected-to')
@user_passes_test(lambda u: u.is_authenticated(), login_url=reverse_lazy('some-login-page'))
def login_required_view(request):
return HttpResponse('Hello you')
def bad_view(request, *args, **kwargs):
raise ValueError("I don't think I'm getting good value for this view")
empty_view_partial = partial(empty_view, template_name="template.html")
empty_view_nested_partial = partial(empty_view_partial, template_name="nested_partial.html")
empty_view_wrapped = update_wrapper(
partial(empty_view, template_name="template.html"), empty_view,
)
| 24.903226 | 92 | 0.761658 |
acfa53b450330e5ada331375789c135ea7d537bf | 9,657 | py | Python | doc/source/conf.py | cvxgrp/cvxpy | 08a11f320011bf5ccf3b74409d301f9ebfbbf528 | [
"ECL-2.0",
"Apache-2.0"
] | 3,285 | 2015-01-03T04:02:29.000Z | 2021-04-19T14:51:29.000Z | doc/source/conf.py | cvxgrp/cvxpy | 08a11f320011bf5ccf3b74409d301f9ebfbbf528 | [
"ECL-2.0",
"Apache-2.0"
] | 1,138 | 2015-01-01T19:40:14.000Z | 2021-04-18T23:37:31.000Z | doc/source/conf.py | cvxgrp/cvxpy | 08a11f320011bf5ccf3b74409d301f9ebfbbf528 | [
"ECL-2.0",
"Apache-2.0"
] | 765 | 2015-01-02T19:29:39.000Z | 2021-04-20T00:50:43.000Z | # -*- coding: utf-8 -*-
#
# CVXPY documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 27 20:47:07 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# To import CVXPY:
sys.path.insert(0, os.path.abspath('../..'))
# To import sphinx extensions we've put in the repository:
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.append('/home/docs/checkouts/readthedocs.org/user_builds/cvxpy/checkouts/1.0/cvxpy')
__version__ = "1.1.15"
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon']
# To suppress autodoc/numpydoc warning.
# http://stackoverflow.com/questions/12206334/sphinx-autosummary-toctree-contains-reference-to-nonexisting-document-warnings
numpydoc_show_class_members = False
# Since readthedocs.org has trouble compiling `cvxopt`, autodoc fails
# whenever it tries to import a CVXPY module to document it.
# The following code replaces the relevant cvxopt modules with
# a dummy namespace, allowing autodoc to work.
class Mocked:
def __setattr__(self, name, value):
self.__dict__[name] = value
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
else:
return None
MOCK_MODULES = ['cvxopt', 'cvxopt.base', 'cvxopt.misc']
sys.modules.update((mod_name, Mocked()) for mod_name in MOCK_MODULES)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CVXPY'
copyright = u'2020, The CVXPY authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import alabaster
table_styling_embed_css = False
html_theme_path = [alabaster.get_path(), "../themes"]
extensions += ['alabaster']
html_theme = 'cvxpy_alabaster'
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html',
]
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'cvxpy',
'github_repo': 'cvxpy',
'github_banner': True,
'github_type': 'star',
'travis_button': False,
'analytics_id': 'UA-50248335-1',
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['../themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cvxpydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'cvxpy.tex', u'CVXPY Documentation',
u'Steven Diamond, Eric Chu, Stephen Boyd', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cvxpy', u'CVXPY Documentation',
[u'Steven Diamond, Eric Chu, Stephen Boyd'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cvxpy', u'CVXPY Documentation',
u'Steven Diamond, Eric Chu, Stephen Boyd', 'CVXPY', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 32.19 | 124 | 0.708295 |
acfa53ee5044da9b7a0a464a64c3cb0df80f9592 | 2,073 | py | Python | tools/paconn-cli/paconn/apimanager/fileuploader.py | amoedo/PowerPlatformConnectors | dbf436b3a2cc02a4231b60b232a696e734d81c55 | [
"MIT"
] | 454 | 2019-05-07T17:58:24.000Z | 2022-03-31T23:20:04.000Z | tools/paconn-cli/paconn/apimanager/fileuploader.py | amoedo/PowerPlatformConnectors | dbf436b3a2cc02a4231b60b232a696e734d81c55 | [
"MIT"
] | 830 | 2019-05-11T10:32:44.000Z | 2022-03-31T18:23:42.000Z | tools/paconn-cli/paconn/apimanager/fileuploader.py | amoedo/PowerPlatformConnectors | dbf436b3a2cc02a4231b60b232a696e734d81c55 | [
"MIT"
] | 753 | 2019-05-11T09:49:56.000Z | 2022-03-31T15:53:52.000Z | # -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
"""
Uploads an file for the custom connector
"""
import os
import mimetypes
from urllib.parse import urlparse, urlunparse
from azure.storage.blob import ContentSettings, BlockBlobService
def upload_file(sas_url, file_path):
# Break the SAS URL
(scheme, netloc, path, params, query, fragment) = urlparse(sas_url)
# Account is the first part of the netlocation upto the dot
account_name = netloc[0:netloc.index('.')]
# The assumption here is that the blob URL will be in the
# form accountname.blob.core.windows.net or
# accountname.blob.core.usgovcloudapi.net.
# Chopping off accountname.blob. to obtain the
# endpoint suffix.
endpoint_suffix = netloc.replace(account_name+'.blob.', '')
# Container name is the path
container_name = path.strip('/')
# Create a block blob service
blockblob_service = BlockBlobService(
account_name=account_name,
sas_token=query,
endpoint_suffix=endpoint_suffix)
# Get the file name of the file
file_name = os.path.basename(file_path)
# Determine the content type and encoding for the file
(content_type, content_encoding) = mimetypes.guess_type(file_name)
content_settings = ContentSettings(
content_type=content_type,
content_encoding=content_encoding)
# Upload the file
blockblob_service.create_blob_from_path(
container_name=container_name,
blob_name=file_name,
file_path=file_path,
content_settings=content_settings)
# Append the file name to the path to generate the download link
path = path + '/' + file_name
urlparts = (scheme, netloc, path, params, query, fragment)
sas_download_url = urlunparse(urlparts)
return sas_download_url
| 35.135593 | 79 | 0.666184 |
acfa5475592e62a0e433bd18ff1fe44620105c62 | 2,278 | py | Python | photo/tests.py | SaintieMonte10/Gallery | 2520ea8f1e5c0ea3fcb5eba3f4f78bc41dbc1460 | [
"MIT"
] | null | null | null | photo/tests.py | SaintieMonte10/Gallery | 2520ea8f1e5c0ea3fcb5eba3f4f78bc41dbc1460 | [
"MIT"
] | 5 | 2020-05-31T22:28:25.000Z | 2020-05-31T22:28:26.000Z | photo/tests.py | SaintieMonte10/Gallery | 2520ea8f1e5c0ea3fcb5eba3f4f78bc41dbc1460 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from .models import *
# Create your tests here.
class LocationTestClass(TestCase):
def setUp(self):
self.clare = Location(loc_name = 'nakuru', id=1)
#Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.clare,Location))
def test_save_method(self):
self.clare.save_loc()
editors = Location.objects.all()
self.assertTrue(len(editors)>0)
def test_delete_method(self):
self.clare.delete_loc()
locations = Location.objects.all()
self.assertTrue(len(locations) is 0)
def test_display_all(self):
jess = Location(loc_name='uganda')
jess.save_loc()
self.clare.save_loc()
locations = Location.objects.all()
print(len(locations))
self.assertTrue(len(locations),2)
def tearDown(self):
Image.objects.all().delete()
Location.objects.all().delete()
Category.objects.all().delete()
class CategoryTestClass(TestCase):
def setUp(self):
self.category = Category(name = 'jess', id=1)
#Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.category,Category))
def test_save_method(self):
self.category.save_category()
catego = Category.objects.all()
self.assertTrue(len(catego)>0)
def test_delete_method(self):
self.category.delete_category
cat = Category.objects.all()
self.assertTrue(len(cat) is 0)
def test_display_all(self):
jess = Category(name='general')
jess.save_category()
one = Category.objects.all()
print(len(one))
self.assertTrue(len(one),2)
class ImageTestClass(TestCase):
def setUp(self):
self.nakuru = Location(loc_name='nakuru')
self.nature = Category(name='general')
self.image = Image(image='images/lagoon.jpeg',image_name='jess', image_descprition='she loves photos',location=self.nakuru,category=self.nature, id=1)
def test_instance(self):
self.assertTrue(isinstance(self.image,Image))
| 28.475 | 158 | 0.623793 |
acfa54d43f4a9b5fb0327b52626ab5e05cd349ab | 13,334 | py | Python | rrt_star.py | nakul3112/Motion_Planning_of_Autonomous_Robot_with_RRT-_and_InformedRRT- | 2534ee480ac007b699c76626ebc9b3b9e969fcb3 | [
"BSD-3-Clause"
] | 5 | 2020-08-11T08:19:36.000Z | 2022-03-14T21:05:52.000Z | rrt_star.py | nakul3112/Motion_Planning_of_Autonomous_Robot_with_RRT-_and_InformedRRT- | 2534ee480ac007b699c76626ebc9b3b9e969fcb3 | [
"BSD-3-Clause"
] | null | null | null | rrt_star.py | nakul3112/Motion_Planning_of_Autonomous_Robot_with_RRT-_and_InformedRRT- | 2534ee480ac007b699c76626ebc9b3b9e969fcb3 | [
"BSD-3-Clause"
] | 2 | 2021-04-14T14:15:18.000Z | 2021-05-25T13:16:06.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri May 10 18:13:13 2019
@author: nakul
"""
import random
import math
import copy
import numpy as np
import matplotlib.pyplot as plt
show_animation = True
class RRT():
"""
Class for RRT Planning
"""
def __init__(self, start, goal, obstacleList, randArea,
expandDis= 10, goalSampleRate=20, maxIter=500):
"""
Setting Parameter
start:Start Position [x,y]
goal:Goal Position [x,y]
obstacleList:obstacle Positions [[x,y,size],...]
randArea:Ramdom Samping Area [min,max]
"""
self.start = Node(start[0], start[1])
self.end = Node(goal[0], goal[1])
self.minrand = randArea[0]
self.maxrand = randArea[1]
self.expandDis = expandDis
self.goalSampleRate = goalSampleRate
self.maxIter = maxIter
self.obstacleList = obstacleList
def Planning(self, animation=True):
"""
Pathplanning
animation: flag for animation on or off
"""
self.nodeList = [self.start]
for i in range(self.maxIter):
rnd = self.get_random_point()
nind = self.GetNearestListIndex(self.nodeList, rnd)
newNode = self.steer(rnd, nind)
# print(newNode.cost)
if self.__CollisionCheck(newNode, self.obstacleList):
nearinds = self.find_near_nodes(newNode)
newNode = self.choose_parent(newNode, nearinds)
self.nodeList.append(newNode)
self.rewire(newNode, nearinds)
if animation and i % 5 == 0:
self.DrawGraph(self.obstacleList, rnd) # Change
# generate coruse
lastIndex = self.get_best_last_index()
if lastIndex is None:
return None
path = self.gen_final_course(lastIndex)
return path
def choose_parent(self, newNode, nearinds):
if not nearinds:
return newNode
dlist = []
for i in nearinds:
dx = newNode.x - self.nodeList[i].x
dy = newNode.y - self.nodeList[i].y
d = math.sqrt(dx ** 2 + dy ** 2)
theta = math.atan2(dy, dx)
if self.check_collision_extend(self.nodeList[i], theta, d):
dlist.append(self.nodeList[i].cost + d)
else:
dlist.append(float("inf"))
mincost = min(dlist)
minind = nearinds[dlist.index(mincost)]
if mincost == float("inf"):
print("mincost is inf")
return newNode
newNode.cost = mincost
newNode.parent = minind
return newNode
def steer(self, rnd, nind):
# expand tree
nearestNode = self.nodeList[nind]
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
newNode = Node(rnd[0], rnd[1])
currentDistance = math.sqrt(
(rnd[1] - nearestNode.y) ** 2 + (rnd[0] - nearestNode.x) ** 2)
# Find a point within expandDis of nind, and closest to rnd
if currentDistance <= self.expandDis:
pass
else:
newNode.x = nearestNode.x + self.expandDis * math.cos(theta)
newNode.y = nearestNode.y + self.expandDis * math.sin(theta)
newNode.cost = float("inf")
newNode.parent = None
return newNode
def get_random_point(self):
if random.randint(0, 1110) > self.goalSampleRate: # Change
rnd = [random.uniform(self.minrand, self.maxrand),
random.uniform(self.minrand, self.maxrand)]
else: # goal point sampling
rnd = [self.end.x, self.end.y]
return rnd
def get_best_last_index(self):
disglist = [self.calc_dist_to_goal(
node.x, node.y) for node in self.nodeList]
goalinds = [disglist.index(i) for i in disglist if i <= self.expandDis]
if not goalinds:
return None
mincost = min([self.nodeList[i].cost for i in goalinds])
for i in goalinds:
if self.nodeList[i].cost == mincost:
return i
return None
def gen_final_course(self, goalind):
path = [[self.end.x, self.end.y]]
while self.nodeList[goalind].parent is not None:
node = self.nodeList[goalind]
path.append([node.x, node.y])
goalind = node.parent
path.append([self.start.x, self.start.y])
return path
def calc_dist_to_goal(self, x, y):
return np.linalg.norm([x - self.end.x, y - self.end.y])
def find_near_nodes(self, newNode):
nnode = len(self.nodeList)
r = 50.0 * math.sqrt((math.log(nnode) / nnode))
# r = self.expandDis * 5.0
dlist = [(node.x - newNode.x) ** 2 +
(node.y - newNode.y) ** 2 for node in self.nodeList]
nearinds = [dlist.index(i) for i in dlist if i <= r ** 2]
return nearinds
def rewire(self, newNode, nearinds):
nnode = len(self.nodeList)
for i in nearinds:
nearNode = self.nodeList[i]
dx = newNode.x - nearNode.x
dy = newNode.y - nearNode.y
d = math.sqrt(dx ** 2 + dy ** 2)
scost = newNode.cost + d
if nearNode.cost > scost:
theta = math.atan2(dy, dx)
if self.check_collision_extend(nearNode, theta, d):
nearNode.parent = nnode - 1
nearNode.cost = scost
def check_collision_extend(self, nearNode, theta, d):
tmpNode = copy.deepcopy(nearNode)
for i in range(int(d / self.expandDis)):
tmpNode.x += self.expandDis * math.cos(theta)
tmpNode.y += self.expandDis * math.sin(theta)
if not self.__CollisionCheck(tmpNode, self.obstacleList):
return False
return True
def DrawGraph(self, obstacleList, rnd=None):
"""
Draw Graph
"""
plt.clf()
o_x = [x[0] for x in obstacleList] # Change
o_y = [y[1] for y in obstacleList] # Change
if rnd is not None:
plt.plot(rnd[0], rnd[1], "^k")
for node in self.nodeList:
if node.parent is not None:
plt.plot([node.x, self.nodeList[node.parent].x], [
node.y, self.nodeList[node.parent].y], "-g")
#==============================================================================
# for (ox, oy) in self.obstacleList:
# plt.plot(ox, oy, "ok", ms=30)
#==============================================================================
plt.plot(self.start.x, self.start.y, "xr")
plt.plot(self.end.x, self.end.y, "xr")
plt.plot(o_x, o_y, "ko") # Change
#plt.axis([-2, 15, -2, 15])
plt.axis([0, 1110, 0, 1010]) # Change
plt.grid(True)
plt.pause(0.01)
def GetNearestListIndex(self, nodeList, rnd):
dlist = [(node.x - rnd[0]) ** 2 + (node.y - rnd[1])
** 2 for node in nodeList]
minind = dlist.index(min(dlist))
return minind
def __CollisionCheck(self, node, obstacleList):
for i in obstacleList:
dx = i[0] - node.x # Change
dy = i[1] - node.y # Change
d = dx * dx + dy * dy
if d <= 2:
return False # collision
return True # safe
#====================================================================
def check_boundary(x, y, r):
bool1= (x >= 0 and x <= r ) or ( x >= 1110-r and x <= 1110 )
bool2= (y >= 0 and y <= r ) or ( y >= 1010-r and y <= 1010 )
req = False
if (bool1 or bool2):
req = True
return req
# For rectangles
def check_rect(x, y, r):
#rect1
f1 = x - 918 - r
f2 = 832 - x - r
f3 = 827 - y - r
f4 = y - 1070 - r
rec1 = (f1 <= 0 and f2 <= 0 and f3 <=0 and f4 <= 0)
#rect2
f1_2 = x - 1026 - r
f2_2 = 983 - x - r
f3_2 = 919 - y - r
f4_2 = y - 1010 - r
rec2 = (f1_2 <= 0 and f2_2 <= 0 and f3_2 <=0 and f4_2 <= 0)
#rect3
f1_3 = x - 1110 - r
f2_3 = 744 - x - r
f3_3 = 621 - y - r
f4_3 = y - 697 - r
rec3 = (f1_3 <= 0 and f2_3 <= 0 and f3_3 <=0 and f4_3 <= 0)
#rect4
f1_4 = x - 1110 - r
f2_4 = 1052 - x - r
f3_4 = 448.5 - y - r
f4_4 = y - 565.5 - r
rec4 = (f1_4 <= 0 and f2_4 <= 0 and f3_4 <=0 and f4_4 <= 0)
#rect5
f1_5 = x - 1110 - r
f2_5 = 1019 - x - r
f3_5 = 362.5 - y - r
f4_5 = y - 448.5 - r
rec5 = (f1_5 <= 0 and f2_5 <= 0 and f3_5 <=0 and f4_5 <= 0)
#rect6
f1_6 = x - 1110 - r
f2_6 = 1052 - x - r
f3_6 = 178.25 - y - r
f4_6 = y - 295.25 - r
rec6 = (f1_6 <= 0 and f2_6 <= 0 and f3_6 <=0 and f4_6 <= 0)
#rect7
f1_7 = x - 1110 - r
f2_7 = 927 - x - r
f3_7 = 35 - y - r
f4_7 = y - 111 - r
rec7 = (f1_7 <= 0 and f2_7 <= 0 and f3_7 <=0 and f4_7 <= 0)
#rect8
f1_8 = x - 1110 - r
f2_8 = 685 - x - r
f3_8 = 0 - y - r
f4_8 = y - 35 - r
rec8 = (f1_8 <= 0 and f2_8 <= 0 and f3_8 <=0 and f4_8 <= 0)
#rect9
f1_9 = x - 896 - r
f2_9 = 779 - x - r
f3_9 = 35 - y - r
f4_9 = y - 93 - r
rec9 = (f1_9 <= 0 and f2_9 <= 0 and f3_9 <=0 and f4_9 <= 0)
#rect10
f1_10 = x - 748 - r
f2_10 = 474 - x - r
f3_10 = 35 - y - r
f4_10 = y - 187 - r
rec10 = (f1_10 <= 0 and f2_10 <= 0 and f3_10 <=0 and f4_10 <= 0)
#rect11
f1_11 = x - 712 - r
f2_11 = 529 - x - r
f3_11 = 265 - y - r
f4_11 = y - 341 - r
rec11 = (f1_11 <= 0 and f2_11 <= 0 and f3_11 <=0 and f4_11 <= 0)
#rect12
f1_12 = x - 529 - r
f2_12 = 438 - x - r
f3_12 = 315 - y - r
f4_12 = y - 498 - r
rec12 = (f1_12 <= 0 and f2_12 <= 0 and f3_12 <=0 and f4_12 <= 0)
#rect13
f1_13 = x - 936.5 - r
f2_13 = 784.5 - x - r
f3_13 = 267 - y - r
f4_13 = y - 384 - r
rec13 = (f1_13 <= 0 and f2_13 <= 0 and f3_13 <=0 and f4_13 <= 0)
req= False
if (rec1 or rec2 or rec3 or rec4 or rec5 or rec6 or rec7 or rec8 or rec9
or rec10 or rec11 or rec12 or rec13):
req = True
return req
# For circles
def check_circle(x, y, r):
eqn_circle_1= (x - 390)**2 + (y - 965)**2 - (40.5 + r)**2
eqn_circle_2= (x - 438)**2 + (y - 736)**2 - (40.5 + r)**2
eqn_circle_3= (x - 390)**2 + (y - 45)**2 - (40.5 + r)**2
eqn_circle_4= (x - 438)**2 + (y - 274)**2 - (40.5 + r)**2
req = False
# using semi-algabraic equation to define obstacle space
if (eqn_circle_1 <= 0 or eqn_circle_2 <= 0 or eqn_circle_3 <= 0 or eqn_circle_4 <= 0):
req = True
return req
# For ellipse
def check_ellipse(x, y, r):
sq_1 = x - 310
sq_2 = 150 - x
sq_3 = 750 - y - r
sq_4 = y - 910 - r
bool1 = (sq_1 <= 0 and sq_2 <= 0 and sq_3 <= 0 and sq_4 <= 0)
#r1
eq_circle_1 = (x - 150)**2 + (y - 830)**2 - (80 + r)**2
#r2
eq_circle_2 = (x - 310)**2 + (y - 830)**2 - (80 + r)**2
req = False
if (bool1 or eq_circle_1 <=0 or eq_circle_2 <=0):
req = True
return req
#====================================================================
class Node():
"""
RRT Node
"""
def __init__(self, x, y):
self.x = x
self.y = y
self.cost = 0.0
self.parent = None
def main():
print("Start " + __file__)
# ====Search Path with RRT====
ox=[] # Change
oy=[]
obstacleList=[]
#robot radius(in cm)
r = 30
for i in range(0,1111):
for j in range(0,1011):
req0 = check_boundary(i, j, r)
req1 = check_rect(i, j, r)
req2 = check_circle(i, j, r)
req3 = check_ellipse(i, j, r)
if (req0 or req1 or req2 or req3):
ox.append(i)
oy.append(j)
obstacleList.append((i,j)) # Change
#print("Obstacle: ", obstacleList)
#==============================================================================
# obstacleList = [
# (5, 5, 1),
# (3, 6, 2),
# (3, 8, 2),
# (3, 10, 2),
# (7, 5, 2),
# (9, 5, 2)
# ] # [x,y,size(radius)]
#==============================================================================
# Set Initial parameters
rrt = RRT(start=[400,400], goal=[600, 400],
randArea=[0, 1110], obstacleList=obstacleList)
path = rrt.Planning(animation=show_animation)
if path is None:
print("\n Cannot find path.")
else:
print("\n Path found!!")
# Draw final path
if show_animation:
rrt.DrawGraph(obstacleList) # Change
plt.plot([x for (x, y) in path], [y for (x, y) in path], '-r',linewidth=2)
plt.grid(True)
plt.pause(0.01) # Need for Mac
plt.show()
if __name__ == '__main__':
main()
| 30.031532 | 90 | 0.482901 |
acfa562a6fb34d8a5f251c24e9e5bcd3cb5121ab | 9,503 | py | Python | docs/conf.py | theendsofinvention/pytest-picked | d0ca73199a01250d264196b8936840eb6b1cd693 | [
"MIT"
] | null | null | null | docs/conf.py | theendsofinvention/pytest-picked | d0ca73199a01250d264196b8936840eb6b1cd693 | [
"MIT"
] | null | null | null | docs/conf.py | theendsofinvention/pytest-picked | d0ca73199a01250d264196b8936840eb6b1cd693 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# picked documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 1 00:43:18 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys
# import os
# import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.ifconfig"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"pytest-picked"
copyright = u"2018, Ana Paula Gomes"
author = u"Ana Paula Gomes"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1.0"
# The full version, including alpha/beta/rc tags.
release = "0.1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "pytest-pytest-pickeddoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"pytest-pytest-picked.tex",
u"pytest-\\{\\{cookiecutter.plugin\\_name\\}\\} Documentation",
u"\\{\\{cookiecutter.full\\_name\\}\\}",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "pytest-pytest-picked", u"picked Documentation", [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pytest-pytest-picked",
u"picked Documentation",
author,
"pytest-pytest-picked",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| 32.544521 | 79 | 0.703673 |
acfa58adf77e82ca7c7fc251e865f34177a5e4f8 | 3,141 | py | Python | huaweicloud-sdk-nat/huaweicloudsdknat/v2/model/create_nat_gateway_snat_rule_request_option.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-nat/huaweicloudsdknat/v2/model/create_nat_gateway_snat_rule_request_option.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-nat/huaweicloudsdknat/v2/model/create_nat_gateway_snat_rule_request_option.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateNatGatewaySnatRuleRequestOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'snat_rule': 'CreateNatGatewaySnatRuleOption'
}
attribute_map = {
'snat_rule': 'snat_rule'
}
def __init__(self, snat_rule=None):
"""CreateNatGatewaySnatRuleRequestOption - a model defined in huaweicloud sdk"""
self._snat_rule = None
self.discriminator = None
self.snat_rule = snat_rule
@property
def snat_rule(self):
"""Gets the snat_rule of this CreateNatGatewaySnatRuleRequestOption.
:return: The snat_rule of this CreateNatGatewaySnatRuleRequestOption.
:rtype: CreateNatGatewaySnatRuleOption
"""
return self._snat_rule
@snat_rule.setter
def snat_rule(self, snat_rule):
"""Sets the snat_rule of this CreateNatGatewaySnatRuleRequestOption.
:param snat_rule: The snat_rule of this CreateNatGatewaySnatRuleRequestOption.
:type: CreateNatGatewaySnatRuleOption
"""
self._snat_rule = snat_rule
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateNatGatewaySnatRuleRequestOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.044643 | 88 | 0.578797 |
acfa5943f7c4016a4a8f9f7bb77b68ffc63238a2 | 4,479 | py | Python | tests/test_bindings.py | evocount/drf-channels-oneway-ws | d3e6598c4b5f7f0c94bdc522c077aef400584d75 | [
"MIT"
] | 2 | 2019-05-27T20:50:22.000Z | 2021-05-14T11:21:23.000Z | tests/test_bindings.py | evocount/drf-channels-oneway-ws | d3e6598c4b5f7f0c94bdc522c077aef400584d75 | [
"MIT"
] | null | null | null | tests/test_bindings.py | evocount/drf-channels-oneway-ws | d3e6598c4b5f7f0c94bdc522c077aef400584d75 | [
"MIT"
] | 1 | 2020-09-24T05:49:56.000Z | 2020-09-24T05:49:56.000Z | from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.conf.urls import url
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from channels.testing import WebsocketCommunicator
from channels.routing import URLRouter
from rest_framework import serializers
from asgiref.sync import sync_to_async
from channels_oneway.bindings import Binding
from channels_oneway.mixins import DRFJsonConsumerMixinAsync
import pytest
User = get_user_model()
@pytest.mark.asyncio
@pytest.mark.django_db
async def test_binding():
class TestBinding(Binding):
model = User
stream = 'users'
m2m_senders = [User.groups.through]
@classmethod
def group_names(cls, instance):
return ['test']
def serialize_data(self, instance):
return {'id': instance.id, 'username': instance.username}
class TestConsumer(AsyncJsonWebsocketConsumer, DRFJsonConsumerMixinAsync):
async def connect(self):
await self.channel_layer.group_add('test', self.channel_name)
await self.accept()
async def disconnect(self, close_code):
await self.channel_layer.group_discard('test', self.channel_name)
application = URLRouter([
url(r"^testws/$", TestConsumer),
])
communicator = WebsocketCommunicator(application, "/testws/")
connected, subprotocol = await communicator.connect()
assert connected
user = await sync_to_async(User.objects.create)(username='root')
response = await communicator.receive_json_from()
assert response == {
'stream': 'users',
'payload': {
'action': 'create',
'data': {'id': 1, 'username': 'root'},
'model': 'auth.user',
'pk': 1
}
}
user.username = 'SuperUser'
await sync_to_async(user.save)()
response = await communicator.receive_json_from()
assert response == {
'stream': 'users',
'payload': {
'action': 'update',
'data': {'id': 1, 'username': 'SuperUser'},
'model': 'auth.user',
'pk': 1
}
}
group = await sync_to_async(Group.objects.create)(name='group')
await sync_to_async(user.groups.set)([group])
response = await communicator.receive_json_from()
assert response == {
'stream': 'users',
'payload': {
'action': 'update',
'data': {'id': 1, 'username': 'SuperUser'},
'model': 'auth.user',
'pk': 1
}
}
await sync_to_async(user.delete)()
response = await communicator.receive_json_from()
assert response == {
'stream': 'users',
'payload': {
'action': 'delete',
'data': {'id': 1, 'username': 'SuperUser'},
'model': 'auth.user',
'pk': 1
}
}
await communicator.disconnect()
@pytest.mark.asyncio
@pytest.mark.django_db
async def test_serializer_binding():
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'first_name', 'last_name')
class TestBinding(Binding):
model = User
stream = 'users'
serializer = UserSerializer
@classmethod
def group_names(cls, instance):
return ['users']
class TestConsumer(AsyncJsonWebsocketConsumer, DRFJsonConsumerMixinAsync):
async def connect(self):
await self.channel_layer.group_add('users', self.channel_name)
await self.accept()
async def disconnect(self, close_code):
await self.channel_layer.group_discard('users', self.channel_name)
application = URLRouter([
url(r"^testws/$", TestConsumer),
])
communicator = WebsocketCommunicator(application, "/testws/")
connected, subprotocol = await communicator.connect()
assert connected
await sync_to_async(User.objects.create)(username='root')
response = await communicator.receive_json_from()
assert response == {
'stream': 'users',
'payload': {
'action': 'create',
'data': {
'id': 2,
'username': 'root',
'first_name': '',
'last_name': ''
},
'model': 'auth.user',
'pk': 2
}
}
await communicator.disconnect()
| 27.478528 | 78 | 0.602813 |
acfa59f345f61c52f40eba99f0a9c82ae13e247a | 2,005 | py | Python | nesi/keymile/keymile_resources/keymile_mgmt_card.py | Tubbz-alt/NESi | 0db169dd6378fbd097380280cc41440e652de19e | [
"BSD-2-Clause"
] | 1 | 2021-04-26T23:15:33.000Z | 2021-04-26T23:15:33.000Z | nesi/keymile/keymile_resources/keymile_mgmt_card.py | Tubbz-alt/NESi | 0db169dd6378fbd097380280cc41440e652de19e | [
"BSD-2-Clause"
] | null | null | null | nesi/keymile/keymile_resources/keymile_mgmt_card.py | Tubbz-alt/NESi | 0db169dd6378fbd097380280cc41440e652de19e | [
"BSD-2-Clause"
] | null | null | null | # This file is part of the NESi software.
#
# Copyright (c) 2020
# Original Software Design by Ilya Etingof <https://github.com/etingof>.
#
# Software adapted by inexio <https://github.com/inexio>.
# - Janis Groß <https://github.com/unkn0wn-user>
# - Philip Konrath <https://github.com/Connyko65>
# - Alexander Dincher <https://github.com/Dinker1996>
#
# License: https://github.com/inexio/NESi/LICENSE.rst
from nesi.softbox.base_resources.mgmt_card import MgmtCard, MgmtCardCollection, logging
from nesi.softbox.base_resources import base
LOG = logging.getLogger(__name__)
class KeyMileMgmtCard(MgmtCard):
"""Represent physical shelf resource."""
board_name = base.Field('board_name')
supplier_build_state = base.Field('supplier_build_state')
board_id = base.Field('board_id')
hardware_key = base.Field('hardware_key')
software = base.Field('software')
software_name = base.Field('software_name')
software_revision = base.Field('software_revision')
state = base.Field('state')
serial_number = base.Field('serial_number')
manufacturer_name = base.Field('manufacturer_name')
model_name = base.Field('model_name')
short_text = base.Field('short_text')
manufacturer_id = base.Field('manufacturer_id')
manufacturer_part_number = base.Field('manufacturer_part_number')
manufacturer_build_state = base.Field('manufacturer_build_state')
customer_id = base.Field('customer_id')
customer_product_id = base.Field('customer_product_id')
boot_loader = base.Field('boot_loader')
processor = base.Field('processor')
label1 = base.Field('label1')
label2 = base.Field('label2')
product = base.Field('product')
def set_label(self, l1, l2, desc):
self.update(label1=l1)
self.update(label2=l2)
self.update(description=desc)
class KeyMileMgntCardCollection(MgmtCardCollection):
"""Represent a collection of cards."""
@property
def _resource_type(self):
return KeyMileMgmtCard
| 35.175439 | 87 | 0.725187 |
acfa5a99012b62512133471c3f313d6f05f511de | 12,873 | py | Python | t5/models/mesh_transformer.py | thomasw21/text-to-text-transfer-transformer | 6b7bcc8fe39ae772682bc7c72c93dec1e9120a80 | [
"Apache-2.0"
] | 4,282 | 2019-10-17T21:45:46.000Z | 2022-03-31T19:45:53.000Z | t5/models/mesh_transformer.py | thomasw21/text-to-text-transfer-transformer | 6b7bcc8fe39ae772682bc7c72c93dec1e9120a80 | [
"Apache-2.0"
] | 480 | 2019-10-25T07:04:11.000Z | 2022-03-30T18:38:42.000Z | t5/models/mesh_transformer.py | thomasw21/text-to-text-transfer-transformer | 6b7bcc8fe39ae772682bc7c72c93dec1e9120a80 | [
"Apache-2.0"
] | 628 | 2019-10-24T03:56:59.000Z | 2022-03-31T03:30:44.000Z | # Copyright 2021 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for providing data to Mesh TF transformer."""
import functools
from absl import logging
import gin
import mesh_tensorflow.transformer.dataset as transformer_dataset
import t5.data
from t5.models import utils as model_utils
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
DEPRECATED_GIN_REFERENCES = (
"configurable_vocabulary",
"get_sentencepiece_model_path",
"maybe_print_dataset",
"num_parallel_calls",
"SentencePieceVocabulary",
"t5.data.sentencepiece_vocabulary.SentencePieceVocabulary",
"t5.models.mesh_transformer.get_sentencepiece_model_path",
"train_model",
"vocabularies.Vocabulary",
"Vocabulary",
)
@gin.configurable()
def mesh_train_dataset_fn(
mixture_or_task_name,
sequence_length,
vocabulary=None,
dataset_split=tfds.Split.TRAIN,
shuffle=True,
seed=None,
use_cached=False,
pack=True):
"""Returns the tf.data.Dataset for training on a given mixture.
This uses the format required for utils.run's `train_dataset_fn` argument in
the Mesh TF transformer standalone.
Args:
mixture_or_task_name: string, an identifier for a Mixture or Task in the
appropriate registry. Must be specified via gin.
sequence_length: dict mapping feature key to the int length for that feature
the max sequence length.
vocabulary: unused argument, maintains compatibility with other dataset_fns.
dataset_split: string, which split of the dataset to load. In most cases
this should be "train".
shuffle: Whether or not to shuffle dataset.
seed: tf.int64 scalar tf.Tensor (or None). Used for both the global seed and
shuffle seed for tf.data
use_cached: bool, whether to load the cached version of this dataset.
pack: bool, whether to pack the dataset.
Returns:
A tf.data.Dataset of preprocessed, tokenized, and batched examples.
"""
del vocabulary
mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name)
ds = mixture_or_task.get_dataset(
sequence_length, split=dataset_split, use_cached=use_cached,
shuffle=shuffle, num_epochs=None, seed=seed)
# Select just the output features which are present in the dataset.
feature_keys = tuple(k for k in mixture_or_task.output_features
if k in tf.data.get_output_shapes(ds))
# Filtering feature keys is done in pack_or_pad function. However, when
# packing is turned off, input_features aren't filtered leading to training
# problems due to strings showing up in the input example. Filtering features
# ensures that we don't rely on pack_or_pad to filter features for training.
def _filter_features(ex):
return {k: ex[k] for k in feature_keys}
ds = ds.map(
_filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)
eos_keys = set(
k for k, f in mixture_or_task.output_features.items() if f.add_eos)
ds = transformer_dataset.pack_or_pad(
ds, sequence_length, pack=pack,
feature_keys=feature_keys, ensure_eos=eos_keys)
return ds
@gin.configurable()
def mesh_inference_dataset_fn(
mixture_or_task_name,
sequence_length,
dataset_split,
shuffle=False,
seed=None,
vocabulary=None,
num_inference_examples=-1,
use_cached=False,
priming_sequence_length=None):
"""Returns all tf.data.Datasets for LM inference on a given mixture.
For Tasks without inputs (such as language modeling), the first
`priming_sequence_length` tokens in the target are used as the "inputs" for
inference.
Args:
mixture_or_task_name: string, an identifier for a Mixture or Task in the
appropriate registry. Must be specified via gin.
sequence_length: dict mapping feature key to the int length for that feature
the max sequence length. If set to None, packing and padding will be
disabled.
dataset_split: string, which split of the dataset to load. NOTE, this
function does NOT receive the split specified in utils.run. It needs to be
specified separately.
shuffle: Whether or not to shuffle dataset.
seed: tf.int64 scalar tf.Tensor (or None). Used as shuffle seed for tf.data.
vocabulary: unused argument, maintains compatibility with other dataaset_fns
num_inference_examples: maximum number of examples per task to do inference
on. If None or less than 0, use all examples.
use_cached: bool, whether to load the cached version of this dataset.
evals but should not be used for iterative decoding.
priming_sequence_length: If the Task only has "targets", select the first
this many tokens from each target sequence to use as "inputs". This is
useful for decoder-only language models where you would like to use a
portion of the targets as a priming sequence for generation.
Returns:
A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples.
"""
del vocabulary
mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name)
def _split_targets_for_primed_inference(ex):
ex["inputs"] = ex["targets"][:priming_sequence_length]
ex["targets"] = ex["targets"][priming_sequence_length:]
ex["inputs"] = tf.pad(
ex["inputs"],
[[0, priming_sequence_length - tf.shape(ex["inputs"])[0]]], "CONSTANT")
ex["inputs"] = tf.reshape(ex["inputs"], shape=(priming_sequence_length,))
return ex
def _prepare_for_unprimed_inference(ex):
ex["inputs"] = tf.constant([], dtype=tf.int64)
return ex
def _get_dataset_for_single_task(task, sequence_length):
"""Get a tensorflow.data.Dataset for the provided task."""
ds = task.get_dataset(
sequence_length, split=dataset_split, use_cached=use_cached,
shuffle=shuffle, seed=seed)
if "inputs" not in ds.element_spec:
if not priming_sequence_length or priming_sequence_length <= 0:
logging.warning("Priming sequence length not specified so priming "
"with the empty string.")
ds = ds.map(_prepare_for_unprimed_inference)
else:
logging.info("Using the first %d tokens of each target as input.",
priming_sequence_length)
ds = ds.map(_split_targets_for_primed_inference)
elif priming_sequence_length is not None:
raise ValueError(
"Setting a priming sequence length only makes sense for decoder-only "
"Tasks, which have `targets` but no `inputs`.")
eos_keys = set(
k for k, f in mixture_or_task.output_features.items() if f.add_eos)
logging.info(
"Padding '%s' with sequence lengths: %s", task.name, sequence_length)
ds = transformer_dataset.pack_or_pad(
ds,
sequence_length,
pack=False,
feature_keys=tuple(task.output_features),
ensure_eos=eos_keys)
if num_inference_examples is not None and num_inference_examples >= 0:
ds = ds.take(num_inference_examples)
return ds
outputs = []
for task in t5.data.get_subtasks(mixture_or_task):
if dataset_split not in task.splits:
logging.info("Task %s has no '%s' split, skipping inference.",
task.name, dataset_split)
continue
outputs.append(
transformer_dataset.EvalDataset(
task.name,
functools.partial(
_get_dataset_for_single_task,
task=task,
sequence_length=sequence_length),
task.postprocess_fn,
task.metric_fns,
)
)
if not outputs:
logging.warning("No %s data found for %s.",
dataset_split, mixture_or_task_name)
return outputs
@gin.configurable()
def mesh_eval_dataset_fn(
mixture_or_task_name,
sequence_length,
dataset_split,
vocabulary=None,
num_eval_examples=-1,
use_cached=False,
pack=False,
shuffle_eval_examples=False,
seed=None):
"""Returns all tf.data.Datasets for evaluation on a given mixture.
This uses the format required for utils.run's `eval_dataset_fn` argument in
the Mesh TF transformer standalone.
Args:
mixture_or_task_name: string, an identifier for a Mixture or Task in the
appropriate registry. Must be specified via gin.
sequence_length: dict mapping feature key to the int length for that feature
the max sequence length. If set to None, packing and padding will be
disabled.
dataset_split: string, which split of the dataset to load.
vocabulary: unused argument, maintains compatibility with other dataaset_fns
num_eval_examples: maximum number of examples per task to use for continuous
eval. If None or less than 0, use all examples.
use_cached: bool, whether to load the cached version of this dataset.
pack: a boolean, whether to pack examples. This is useful for perplexity
evals but should not be used for iterative decoding.
shuffle_eval_examples: boolean, whether to shuffle eval examples, applied
only when num_eval_examples is not None. Intended to be able to eval on a
different eval slice at every iteration.
seed: tf.int64 scalar tf.Tensor (or None). Used for both the global seed and
shuffle seed for tf.data
Returns:
A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples.
"""
del vocabulary
mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name)
def _get_dataset_for_single_task(task, sequence_length):
"""Get a tensorflow.data.Dataset for the provided task."""
if shuffle_eval_examples and seed is None:
logging.warning(("shuffle_seed_examples is true but no seed was ",
"provided. Using a random seed."))
ds = task.get_dataset(
sequence_length, split=dataset_split,
use_cached=use_cached, shuffle=shuffle_eval_examples, seed=seed,
)
eos_keys = set(
k for k, f in mixture_or_task.output_features.items() if f.add_eos)
if sequence_length is None:
logging.info(
"Skipping packing/padding for '%s' since sequence length is None.",
task.name)
else:
logging.info(
"%sing '%s' with sequence lengths: %s",
"Pack" if pack else "Padd", task.name, sequence_length)
ds = transformer_dataset.pack_or_pad(
ds,
sequence_length,
pack=pack,
feature_keys=tuple(task.output_features),
ensure_eos=eos_keys)
if num_eval_examples is not None and num_eval_examples >= 0:
ds = ds.take(num_eval_examples)
return ds
outputs = []
for task in t5.data.get_subtasks(mixture_or_task):
if dataset_split not in task.splits:
logging.info(
"Task %s has no '%s' split, skipping eval.", task.name, dataset_split
)
continue
outputs.append(
transformer_dataset.EvalDataset(
task.name,
functools.partial(
_get_dataset_for_single_task,
task=task,
sequence_length=sequence_length),
task.postprocess_fn,
task.metric_fns,
)
)
if not outputs:
logging.warning("No %s data found for %s.",
dataset_split, mixture_or_task_name)
return outputs
@gin.configurable()
def tsv_dataset_fn(
filename,
sequence_length,
dataset_split,
vocabulary,
shuffle_buffer_size=10000):
r"""Returns a dataset based on a TSV file formatted as `<input>\t<target>`."""
# Currently `tf.gfile.glob` is broken on GCS, so we only read a file or
# list of files.
return transformer_dataset.packed_parallel_tsv_dataset(
dataset=tf.data.TextLineDataset(filename).shuffle(shuffle_buffer_size),
sequence_length=sequence_length,
vocabulary=vocabulary,
dataset_split=dataset_split,
append_eos=True,
eos_id=1)
@gin.configurable()
def get_vocabulary(mixture_or_task_name=None):
"""Get the appropriate value for the utils.run.vocabulary argument.
Args:
mixture_or_task_name: string, an identifier for a Mixture or Task in the
appropriate registry. Must be specified via gin.
Returns:
Either a single t5.data.vocabularies.Vocabulary or a tuple of
t5.data.vocabularies.Vocabulary for inputs and targets.
"""
return model_utils.get_vocabulary(mixture_or_task_name)
| 35.958101 | 80 | 0.707683 |
acfa5a9927126eddb007c770ea4a44dcbde56eb9 | 925 | py | Python | functional_test/test_layout_and_styling.py | fromdanut/superlists | c06ee0ac8dbe8ede1d67f97fc7d435608bc87581 | [
"MIT"
] | null | null | null | functional_test/test_layout_and_styling.py | fromdanut/superlists | c06ee0ac8dbe8ede1d67f97fc7d435608bc87581 | [
"MIT"
] | null | null | null | functional_test/test_layout_and_styling.py | fromdanut/superlists | c06ee0ac8dbe8ede1d67f97fc7d435608bc87581 | [
"MIT"
] | null | null | null | from .base import FunctionalTest
from selenium.webdriver.common.keys import Keys
class LayoutAndStylingTest(FunctionalTest):
def test_layout_and_styling(self):
# Edith goes to the home page
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
# She notices the inputbox is centered
inputbox = self.get_item_input_box()
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=10
)
# She starts a new list and sees the input is centered too
inputbox.send_keys('testing')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: testing')
inputbox = self.get_item_input_box()
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=10
)
| 33.035714 | 66 | 0.629189 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.