hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71e77b8f44d22273b61f23147bbe79a416e5b87 | 1,166 | py | Python | aoc_2021/day03.py | guido-weber/AoC_2021 | f5d41ea0600f702857b2c479a67f4f9578afb52b | [
"Unlicense"
] | null | null | null | aoc_2021/day03.py | guido-weber/AoC_2021 | f5d41ea0600f702857b2c479a67f4f9578afb52b | [
"Unlicense"
] | null | null | null | aoc_2021/day03.py | guido-weber/AoC_2021 | f5d41ea0600f702857b2c479a67f4f9578afb52b | [
"Unlicense"
] | null | null | null | import io
from collections import Counter
from typing import Iterable
def read_input():
with io.open("input/day03") as f:
return f.read()
def most_common(bits: Iterable[str]):
c = Counter(bits)
return "0" if c["0"] > c["1"] else "1"
def least_common(bits: Iterable[str]):
c = Counter(bits)
return "1" if c["1"] < c["0"] else "0"
def invert(bits: str):
return "".join("0" if b == "1" else "1" for b in bits)
def day03_1(input: str):
lines = input.splitlines()
gamma = "".join(most_common(bits) for bits in zip(*lines))
epsilon = invert(gamma)
return int(gamma, 2) * int(epsilon, 2)
def select_line(lines: list[str], crit):
prefix = ""
while len(lines) > 1:
pl = len(prefix)
prefix += crit(line[pl] for line in lines)
lines = [line for line in lines if line.startswith(prefix)]
return int(lines[0], 2)
def day03_2(input: str):
lines = input.splitlines()
oxy = select_line(lines, most_common)
co2 = select_line(lines, least_common)
return oxy * co2
if __name__ == "__main__":
input = read_input()
print(day03_1(input))
print(day03_2(input))
| 22.423077 | 67 | 0.621784 | import io
from collections import Counter
from typing import Iterable
def read_input():
with io.open("input/day03") as f:
return f.read()
def most_common(bits: Iterable[str]):
c = Counter(bits)
return "0" if c["0"] > c["1"] else "1"
def least_common(bits: Iterable[str]):
c = Counter(bits)
return "1" if c["1"] < c["0"] else "0"
def invert(bits: str):
return "".join("0" if b == "1" else "1" for b in bits)
def day03_1(input: str):
lines = input.splitlines()
gamma = "".join(most_common(bits) for bits in zip(*lines))
epsilon = invert(gamma)
return int(gamma, 2) * int(epsilon, 2)
def select_line(lines: list[str], crit):
prefix = ""
while len(lines) > 1:
pl = len(prefix)
prefix += crit(line[pl] for line in lines)
lines = [line for line in lines if line.startswith(prefix)]
return int(lines[0], 2)
def day03_2(input: str):
lines = input.splitlines()
oxy = select_line(lines, most_common)
co2 = select_line(lines, least_common)
return oxy * co2
if __name__ == "__main__":
input = read_input()
print(day03_1(input))
print(day03_2(input))
| true | true |
f71e77d479a5c19a10183f4785ab075fdd327612 | 380 | py | Python | vfio_isolate/action/action.py | spheenik/vfio-isolate | 6d6a1f0d5e5d84a5ad9911c635a81b86710d12d5 | [
"MIT"
] | 44 | 2020-05-03T15:03:32.000Z | 2022-03-23T19:03:23.000Z | vfio_isolate/action/action.py | darkguy2008/vfio-isolate | 6c16cf363a627f02202586a17df58522e097ef10 | [
"MIT"
] | 7 | 2020-08-18T10:17:14.000Z | 2022-01-14T14:18:47.000Z | vfio_isolate/action/action.py | darkguy2008/vfio-isolate | 6c16cf363a627f02202586a17df58522e097ef10 | [
"MIT"
] | 6 | 2020-06-02T05:29:34.000Z | 2022-02-04T17:12:40.000Z | from dataclasses import dataclass
from typing import Generator
@dataclass
class Execution:
action: type
params: object
class Action:
@classmethod
def can_execute(cls, p):
return True
@classmethod
def execute(cls, p):
pass
@classmethod
def record_undo(cls, p) -> Generator[Execution, None, None]:
return
yield
| 15.2 | 64 | 0.644737 | from dataclasses import dataclass
from typing import Generator
@dataclass
class Execution:
action: type
params: object
class Action:
@classmethod
def can_execute(cls, p):
return True
@classmethod
def execute(cls, p):
pass
@classmethod
def record_undo(cls, p) -> Generator[Execution, None, None]:
return
yield
| true | true |
f71e781a9cac3e602bc48bcfc5f1e148c85a0985 | 1,536 | py | Python | upy/__init__.py | transforma-digital/upy | b70b65ea3f8b8c47a64d54567289280fd78877fe | [
"BSD-3-Clause"
] | null | null | null | upy/__init__.py | transforma-digital/upy | b70b65ea3f8b8c47a64d54567289280fd78877fe | [
"BSD-3-Clause"
] | 5 | 2021-08-04T01:30:48.000Z | 2021-08-06T17:42:08.000Z | upy/__init__.py | transforma-digital/upy | b70b65ea3f8b8c47a64d54567289280fd78877fe | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2021 sinek-dev
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 52.965517 | 75 | 0.775391 |
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
| true | true |
f71e781ca6c1fe1d224c75e75b6f5fc9def42831 | 8,411 | py | Python | lib/win32popen.py | manuvaldi/viewvc-wiki | a8627d695cc80425ed201ded9ab7030438d67a03 | [
"BSD-2-Clause"
] | null | null | null | lib/win32popen.py | manuvaldi/viewvc-wiki | a8627d695cc80425ed201ded9ab7030438d67a03 | [
"BSD-2-Clause"
] | null | null | null | lib/win32popen.py | manuvaldi/viewvc-wiki | a8627d695cc80425ed201ded9ab7030438d67a03 | [
"BSD-2-Clause"
] | null | null | null | # -*-python-*-
#
# Copyright (C) 1999-2015 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
#
# Utilities for controlling processes and pipes on win32
#
# -----------------------------------------------------------------------
import os, sys, traceback, string, thread
try:
import win32api
except ImportError, e:
raise ImportError, str(e) + """
Did you install the Python for Windows Extensions?
http://sourceforge.net/projects/pywin32/
"""
import win32process, win32pipe, win32con
import win32event, win32file, winerror
import pywintypes, msvcrt
# Buffer size for spooling
SPOOL_BYTES = 4096
# File object to write error messages
SPOOL_ERROR = sys.stderr
#SPOOL_ERROR = open("m:/temp/error.txt", "wt")
def CommandLine(command, args):
"""Convert an executable path and a sequence of arguments into a command
line that can be passed to CreateProcess"""
cmd = "\"" + string.replace(command, "\"", "\"\"") + "\""
for arg in args:
cmd = cmd + " \"" + string.replace(arg, "\"", "\"\"") + "\""
return cmd
def CreateProcess(cmd, hStdInput, hStdOutput, hStdError):
"""Creates a new process which uses the specified handles for its standard
input, output, and error. The handles must be inheritable. 0 can be passed
as a special handle indicating that the process should inherit the current
process's input, output, or error streams, and None can be passed to discard
the child process's output or to prevent it from reading any input."""
# initialize new process's startup info
si = win32process.STARTUPINFO()
si.dwFlags = win32process.STARTF_USESTDHANDLES
if hStdInput == 0:
si.hStdInput = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
else:
si.hStdInput = hStdInput
if hStdOutput == 0:
si.hStdOutput = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
else:
si.hStdOutput = hStdOutput
if hStdError == 0:
si.hStdError = win32api.GetStdHandle(win32api.STD_ERROR_HANDLE)
else:
si.hStdError = hStdError
# create the process
phandle, pid, thandle, tid = win32process.CreateProcess \
( None, # appName
cmd, # commandLine
None, # processAttributes
None, # threadAttributes
1, # bInheritHandles
win32con.NORMAL_PRIORITY_CLASS, # dwCreationFlags
None, # newEnvironment
None, # currentDirectory
si # startupinfo
)
if hStdInput and hasattr(hStdInput, 'Close'):
hStdInput.Close()
if hStdOutput and hasattr(hStdOutput, 'Close'):
hStdOutput.Close()
if hStdError and hasattr(hStdError, 'Close'):
hStdError.Close()
return phandle, pid, thandle, tid
def CreatePipe(readInheritable, writeInheritable):
"""Create a new pipe specifying whether the read and write ends are
inheritable and whether they should be created for blocking or nonblocking
I/O."""
r, w = win32pipe.CreatePipe(None, SPOOL_BYTES)
if readInheritable:
r = MakeInheritedHandle(r)
if writeInheritable:
w = MakeInheritedHandle(w)
return r, w
def File2FileObject(pipe, mode):
"""Make a C stdio file object out of a win32 file handle"""
if string.find(mode, 'r') >= 0:
wmode = os.O_RDONLY
elif string.find(mode, 'w') >= 0:
wmode = os.O_WRONLY
if string.find(mode, 'b') >= 0:
wmode = wmode | os.O_BINARY
if string.find(mode, 't') >= 0:
wmode = wmode | os.O_TEXT
return os.fdopen(msvcrt.open_osfhandle(pipe.Detach(),wmode),mode)
def FileObject2File(fileObject):
"""Get the win32 file handle from a C stdio file object"""
return win32file._get_osfhandle(fileObject.fileno())
def DuplicateHandle(handle):
"""Duplicates a win32 handle."""
proc = win32api.GetCurrentProcess()
return win32api.DuplicateHandle(proc,handle,proc,0,0,win32con.DUPLICATE_SAME_ACCESS)
def MakePrivateHandle(handle, replace = 1):
"""Turn an inherited handle into a non inherited one. This avoids the
handle duplication that occurs on CreateProcess calls which can create
uncloseable pipes."""
### Could change implementation to use SetHandleInformation()...
flags = win32con.DUPLICATE_SAME_ACCESS
proc = win32api.GetCurrentProcess()
if replace: flags = flags | win32con.DUPLICATE_CLOSE_SOURCE
newhandle = win32api.DuplicateHandle(proc,handle,proc,0,0,flags)
if replace: handle.Detach() # handle was already deleted by the last call
return newhandle
def MakeInheritedHandle(handle, replace = 1):
"""Turn a private handle into an inherited one."""
### Could change implementation to use SetHandleInformation()...
flags = win32con.DUPLICATE_SAME_ACCESS
proc = win32api.GetCurrentProcess()
if replace: flags = flags | win32con.DUPLICATE_CLOSE_SOURCE
newhandle = win32api.DuplicateHandle(proc,handle,proc,0,1,flags)
if replace: handle.Detach() # handle was deleted by the last call
return newhandle
def MakeSpyPipe(readInheritable, writeInheritable, outFiles = None, doneEvent = None):
"""Return read and write handles to a pipe that asynchronously writes all of
its input to the files in the outFiles sequence. doneEvent can be None, or a
a win32 event handle that will be set when the write end of pipe is closed.
"""
if outFiles is None:
return CreatePipe(readInheritable, writeInheritable)
r, writeHandle = CreatePipe(0, writeInheritable)
if readInheritable is None:
readHandle, w = None, None
else:
readHandle, w = CreatePipe(readInheritable, 0)
thread.start_new_thread(SpoolWorker, (r, w, outFiles, doneEvent))
return readHandle, writeHandle
def SpoolWorker(srcHandle, destHandle, outFiles, doneEvent):
"""Thread entry point for implementation of MakeSpyPipe"""
try:
buffer = win32file.AllocateReadBuffer(SPOOL_BYTES)
while 1:
try:
#print >> SPOOL_ERROR, "Calling ReadFile..."; SPOOL_ERROR.flush()
hr, data = win32file.ReadFile(srcHandle, buffer)
#print >> SPOOL_ERROR, "ReadFile returned '%s', '%s'" % (str(hr), str(data)); SPOOL_ERROR.flush()
if hr != 0:
raise "win32file.ReadFile returned %i, '%s'" % (hr, data)
elif len(data) == 0:
break
except pywintypes.error, e:
#print >> SPOOL_ERROR, "ReadFile threw '%s'" % str(e); SPOOL_ERROR.flush()
if e.args[0] == winerror.ERROR_BROKEN_PIPE:
break
else:
raise e
#print >> SPOOL_ERROR, "Writing to %i file objects..." % len(outFiles); SPOOL_ERROR.flush()
for f in outFiles:
f.write(data)
#print >> SPOOL_ERROR, "Done writing to file objects."; SPOOL_ERROR.flush()
#print >> SPOOL_ERROR, "Writing to destination %s" % str(destHandle); SPOOL_ERROR.flush()
if destHandle:
#print >> SPOOL_ERROR, "Calling WriteFile..."; SPOOL_ERROR.flush()
hr, bytes = win32file.WriteFile(destHandle, data)
#print >> SPOOL_ERROR, "WriteFile() passed %i bytes and returned %i, %i" % (len(data), hr, bytes); SPOOL_ERROR.flush()
if hr != 0 or bytes != len(data):
raise "win32file.WriteFile() passed %i bytes and returned %i, %i" % (len(data), hr, bytes)
srcHandle.Close()
if doneEvent:
win32event.SetEvent(doneEvent)
if destHandle:
destHandle.Close()
except:
info = sys.exc_info()
SPOOL_ERROR.writelines(apply(traceback.format_exception, info), '')
SPOOL_ERROR.flush()
del info
def NullFile(inheritable):
"""Create a null file handle."""
if inheritable:
sa = pywintypes.SECURITY_ATTRIBUTES()
sa.bInheritHandle = 1
else:
sa = None
return win32file.CreateFile("nul",
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
win32file.FILE_SHARE_READ | win32file.FILE_SHARE_WRITE,
sa, win32file.OPEN_EXISTING, 0, None)
| 35.639831 | 127 | 0.653311 |
import os, sys, traceback, string, thread
try:
import win32api
except ImportError, e:
raise ImportError, str(e) + """
Did you install the Python for Windows Extensions?
http://sourceforge.net/projects/pywin32/
"""
import win32process, win32pipe, win32con
import win32event, win32file, winerror
import pywintypes, msvcrt
SPOOL_BYTES = 4096
SPOOL_ERROR = sys.stderr
def CommandLine(command, args):
"""Convert an executable path and a sequence of arguments into a command
line that can be passed to CreateProcess"""
cmd = "\"" + string.replace(command, "\"", "\"\"") + "\""
for arg in args:
cmd = cmd + " \"" + string.replace(arg, "\"", "\"\"") + "\""
return cmd
def CreateProcess(cmd, hStdInput, hStdOutput, hStdError):
"""Creates a new process which uses the specified handles for its standard
input, output, and error. The handles must be inheritable. 0 can be passed
as a special handle indicating that the process should inherit the current
process's input, output, or error streams, and None can be passed to discard
the child process's output or to prevent it from reading any input."""
si = win32process.STARTUPINFO()
si.dwFlags = win32process.STARTF_USESTDHANDLES
if hStdInput == 0:
si.hStdInput = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
else:
si.hStdInput = hStdInput
if hStdOutput == 0:
si.hStdOutput = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
else:
si.hStdOutput = hStdOutput
if hStdError == 0:
si.hStdError = win32api.GetStdHandle(win32api.STD_ERROR_HANDLE)
else:
si.hStdError = hStdError
# create the process
phandle, pid, thandle, tid = win32process.CreateProcess \
( None, # appName
cmd, # commandLine
None, # processAttributes
None, # threadAttributes
1, # bInheritHandles
win32con.NORMAL_PRIORITY_CLASS, # dwCreationFlags
None, # newEnvironment
None, # currentDirectory
si # startupinfo
)
if hStdInput and hasattr(hStdInput, 'Close'):
hStdInput.Close()
if hStdOutput and hasattr(hStdOutput, 'Close'):
hStdOutput.Close()
if hStdError and hasattr(hStdError, 'Close'):
hStdError.Close()
return phandle, pid, thandle, tid
def CreatePipe(readInheritable, writeInheritable):
"""Create a new pipe specifying whether the read and write ends are
inheritable and whether they should be created for blocking or nonblocking
I/O."""
r, w = win32pipe.CreatePipe(None, SPOOL_BYTES)
if readInheritable:
r = MakeInheritedHandle(r)
if writeInheritable:
w = MakeInheritedHandle(w)
return r, w
def File2FileObject(pipe, mode):
"""Make a C stdio file object out of a win32 file handle"""
if string.find(mode, 'r') >= 0:
wmode = os.O_RDONLY
elif string.find(mode, 'w') >= 0:
wmode = os.O_WRONLY
if string.find(mode, 'b') >= 0:
wmode = wmode | os.O_BINARY
if string.find(mode, 't') >= 0:
wmode = wmode | os.O_TEXT
return os.fdopen(msvcrt.open_osfhandle(pipe.Detach(),wmode),mode)
def FileObject2File(fileObject):
"""Get the win32 file handle from a C stdio file object"""
return win32file._get_osfhandle(fileObject.fileno())
def DuplicateHandle(handle):
"""Duplicates a win32 handle."""
proc = win32api.GetCurrentProcess()
return win32api.DuplicateHandle(proc,handle,proc,0,0,win32con.DUPLICATE_SAME_ACCESS)
def MakePrivateHandle(handle, replace = 1):
"""Turn an inherited handle into a non inherited one. This avoids the
handle duplication that occurs on CreateProcess calls which can create
uncloseable pipes."""
### Could change implementation to use SetHandleInformation()...
flags = win32con.DUPLICATE_SAME_ACCESS
proc = win32api.GetCurrentProcess()
if replace: flags = flags | win32con.DUPLICATE_CLOSE_SOURCE
newhandle = win32api.DuplicateHandle(proc,handle,proc,0,0,flags)
if replace: handle.Detach() # handle was already deleted by the last call
return newhandle
def MakeInheritedHandle(handle, replace = 1):
"""Turn a private handle into an inherited one."""
### Could change implementation to use SetHandleInformation()...
flags = win32con.DUPLICATE_SAME_ACCESS
proc = win32api.GetCurrentProcess()
if replace: flags = flags | win32con.DUPLICATE_CLOSE_SOURCE
newhandle = win32api.DuplicateHandle(proc,handle,proc,0,1,flags)
if replace: handle.Detach() # handle was deleted by the last call
return newhandle
def MakeSpyPipe(readInheritable, writeInheritable, outFiles = None, doneEvent = None):
"""Return read and write handles to a pipe that asynchronously writes all of
its input to the files in the outFiles sequence. doneEvent can be None, or a
a win32 event handle that will be set when the write end of pipe is closed.
"""
if outFiles is None:
return CreatePipe(readInheritable, writeInheritable)
r, writeHandle = CreatePipe(0, writeInheritable)
if readInheritable is None:
readHandle, w = None, None
else:
readHandle, w = CreatePipe(readInheritable, 0)
thread.start_new_thread(SpoolWorker, (r, w, outFiles, doneEvent))
return readHandle, writeHandle
def SpoolWorker(srcHandle, destHandle, outFiles, doneEvent):
"""Thread entry point for implementation of MakeSpyPipe"""
try:
buffer = win32file.AllocateReadBuffer(SPOOL_BYTES)
while 1:
try:
#print >> SPOOL_ERROR, "Calling ReadFile..."; SPOOL_ERROR.flush()
hr, data = win32file.ReadFile(srcHandle, buffer)
#print >> SPOOL_ERROR, "ReadFile returned '%s', '%s'" % (str(hr), str(data)); SPOOL_ERROR.flush()
if hr != 0:
raise "win32file.ReadFile returned %i, '%s'" % (hr, data)
elif len(data) == 0:
break
except pywintypes.error, e:
#print >> SPOOL_ERROR, "ReadFile threw '%s'" % str(e); SPOOL_ERROR.flush()
if e.args[0] == winerror.ERROR_BROKEN_PIPE:
break
else:
raise e
#print >> SPOOL_ERROR, "Writing to %i file objects..." % len(outFiles); SPOOL_ERROR.flush()
for f in outFiles:
f.write(data)
#print >> SPOOL_ERROR, "Done writing to file objects."; SPOOL_ERROR.flush()
#print >> SPOOL_ERROR, "Writing to destination %s" % str(destHandle); SPOOL_ERROR.flush()
if destHandle:
#print >> SPOOL_ERROR, "Calling WriteFile..."; SPOOL_ERROR.flush()
hr, bytes = win32file.WriteFile(destHandle, data)
#print >> SPOOL_ERROR, "WriteFile() passed %i bytes and returned %i, %i" % (len(data), hr, bytes); SPOOL_ERROR.flush()
if hr != 0 or bytes != len(data):
raise "win32file.WriteFile() passed %i bytes and returned %i, %i" % (len(data), hr, bytes)
srcHandle.Close()
if doneEvent:
win32event.SetEvent(doneEvent)
if destHandle:
destHandle.Close()
except:
info = sys.exc_info()
SPOOL_ERROR.writelines(apply(traceback.format_exception, info), '')
SPOOL_ERROR.flush()
del info
def NullFile(inheritable):
"""Create a null file handle."""
if inheritable:
sa = pywintypes.SECURITY_ATTRIBUTES()
sa.bInheritHandle = 1
else:
sa = None
return win32file.CreateFile("nul",
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
win32file.FILE_SHARE_READ | win32file.FILE_SHARE_WRITE,
sa, win32file.OPEN_EXISTING, 0, None)
| false | true |
f71e798522edd2d2ed86a48c227d9550e9392a77 | 19,740 | py | Python | tests/wallet/did_wallet/test_did.py | MintNetwork/mint-blockchain | 65ec05a015a07664ed25f83efa736065a17f7d7a | [
"Apache-2.0"
] | 12 | 2021-08-18T20:53:31.000Z | 2022-03-15T21:45:13.000Z | tests/wallet/did_wallet/test_did.py | MintNetwork/mint-blockchain | 65ec05a015a07664ed25f83efa736065a17f7d7a | [
"Apache-2.0"
] | 34 | 2021-08-18T19:12:11.000Z | 2022-01-06T17:15:34.000Z | tests/wallet/did_wallet/test_did.py | MintNetwork/mint-blockchain | 65ec05a015a07664ed25f83efa736065a17f7d7a | [
"Apache-2.0"
] | 7 | 2021-08-18T20:53:34.000Z | 2022-03-15T08:37:40.000Z | import asyncio
import pytest
from mint.simulator.simulator_protocol import FarmNewBlockProtocol
from mint.types.peer_info import PeerInfo
from mint.util.ints import uint16, uint32, uint64
from tests.setup_nodes import setup_simulators_and_wallets
from mint.wallet.did_wallet.did_wallet import DIDWallet
from mint.types.blockchain_format.program import Program
from blspy import AugSchemeMPL
from mint.types.spend_bundle import SpendBundle
from mint.consensus.block_rewards import calculate_pool_reward, calculate_base_farmer_reward
from tests.time_out_assert import time_out_assert
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestDIDWallet:
@pytest.fixture(scope="function")
async def wallet_node(self):
async for _ in setup_simulators_and_wallets(1, 1, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.fixture(scope="function")
async def three_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 3, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes_five_freeze(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.fixture(scope="function")
async def three_sim_two_wallets(self):
async for _ in setup_simulators_and_wallets(3, 2, {}):
yield _
@pytest.mark.asyncio
async def test_creation_from_backup_file(self, three_wallet_nodes):
num_blocks = 5
full_nodes, wallets = three_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, server_0 = wallets[0]
wallet_node_1, server_1 = wallets[1]
wallet_node_2, server_2 = wallets[2]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet_0.get_new_puzzlehash()
ph1 = await wallet_1.get_new_puzzlehash()
ph2 = await wallet_2.get_new_puzzlehash()
await server_0.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_1.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(10, wallet_0.get_unconfirmed_balance, funds)
await time_out_assert(10, wallet_0.get_confirmed_balance, funds)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph1))
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
# Wallet1 sets up DIDWallet1 without any backup set
async with wallet_node_0.wallet_state_manager.lock:
did_wallet_0: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_0.wallet_state_manager, wallet_0, uint64(101)
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_0.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_0.get_unconfirmed_balance, 101)
await time_out_assert(15, did_wallet_0.get_pending_change_balance, 0)
# Wallet1 sets up DIDWallet_1 with DIDWallet_0 as backup
backup_ids = [bytes.fromhex(did_wallet_0.get_my_DID())]
async with wallet_node_1.wallet_state_manager.lock:
did_wallet_1: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_1.wallet_state_manager, wallet_1, uint64(201), backup_ids
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_1.get_confirmed_balance, 201)
await time_out_assert(15, did_wallet_1.get_unconfirmed_balance, 201)
await time_out_assert(15, did_wallet_1.get_pending_change_balance, 0)
filename = "test.backup"
did_wallet_1.create_backup(filename)
# Wallet2 recovers DIDWallet2 to a new set of keys
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_2 = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_node_2.wallet_state_manager, wallet_2, filename
)
coins = await did_wallet_1.select_coins(1)
coin = coins.copy().pop()
assert did_wallet_2.did_info.temp_coin == coin
newpuzhash = await did_wallet_2.get_new_inner_hash()
pubkey = bytes(
(await did_wallet_2.wallet_state_manager.get_unused_derivation_record(did_wallet_2.wallet_info.id)).pubkey
)
message_spend_bundle = await did_wallet_0.create_attestment(
did_wallet_2.did_info.temp_coin.name(), newpuzhash, pubkey, "test.attest"
)
print(f"pubkey: {pubkey}")
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
(
test_info_list,
test_message_spend_bundle,
) = await did_wallet_2.load_attest_files_for_recovery_spend(["test.attest"])
assert message_spend_bundle == test_message_spend_bundle
await did_wallet_2.recovery_spend(
did_wallet_2.did_info.temp_coin,
newpuzhash,
test_info_list,
pubkey,
test_message_spend_bundle,
)
print(f"pubkey: {did_wallet_2}")
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(45, did_wallet_2.get_confirmed_balance, 201)
await time_out_assert(45, did_wallet_2.get_unconfirmed_balance, 201)
some_ph = 32 * b"\2"
await did_wallet_2.create_exit_spend(some_ph)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
async def get_coins_with_ph():
coins = await full_node_api.full_node.coin_store.get_coin_records_by_puzzle_hash(True, some_ph)
if len(coins) == 1:
return True
return False
await time_out_assert(15, get_coins_with_ph, True)
await time_out_assert(45, did_wallet_2.get_confirmed_balance, 0)
await time_out_assert(45, did_wallet_2.get_unconfirmed_balance, 0)
@pytest.mark.asyncio
async def test_did_recovery_with_multiple_backup_dids(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
server_1 = full_node_1.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
async with wallet_node.wallet_state_manager.lock:
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager, wallet, uint64(101)
)
ph = await wallet2.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
recovery_list = [bytes.fromhex(did_wallet.get_my_DID())]
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_2: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, wallet2, uint64(101), recovery_list
)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_2.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_2.get_unconfirmed_balance, 101)
assert did_wallet_2.did_info.backup_ids == recovery_list
recovery_list.append(bytes.fromhex(did_wallet_2.get_my_DID()))
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_3: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, wallet2, uint64(201), recovery_list
)
ph2 = await wallet.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
assert did_wallet_3.did_info.backup_ids == recovery_list
await time_out_assert(15, did_wallet_3.get_confirmed_balance, 201)
await time_out_assert(15, did_wallet_3.get_unconfirmed_balance, 201)
coins = await did_wallet_3.select_coins(1)
coin = coins.pop()
filename = "test.backup"
did_wallet_3.create_backup(filename)
async with wallet_node.wallet_state_manager.lock:
did_wallet_4 = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_node.wallet_state_manager,
wallet,
filename,
)
pubkey = (
await did_wallet_4.wallet_state_manager.get_unused_derivation_record(did_wallet_2.wallet_info.id)
).pubkey
new_ph = await did_wallet_4.get_new_inner_hash()
message_spend_bundle = await did_wallet.create_attestment(coin.name(), new_ph, pubkey, "test1.attest")
message_spend_bundle2 = await did_wallet_2.create_attestment(coin.name(), new_ph, pubkey, "test2.attest")
message_spend_bundle = message_spend_bundle.aggregate([message_spend_bundle, message_spend_bundle2])
(
test_info_list,
test_message_spend_bundle,
) = await did_wallet_4.load_attest_files_for_recovery_spend(["test1.attest", "test2.attest"])
assert message_spend_bundle == test_message_spend_bundle
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await did_wallet_4.recovery_spend(coin, new_ph, test_info_list, pubkey, message_spend_bundle)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet_4.get_confirmed_balance, 201)
await time_out_assert(15, did_wallet_4.get_unconfirmed_balance, 201)
await time_out_assert(15, did_wallet_3.get_confirmed_balance, 0)
await time_out_assert(15, did_wallet_3.get_unconfirmed_balance, 0)
@pytest.mark.asyncio
async def test_did_recovery_with_empty_set(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
server_1 = full_node_1.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
async with wallet_node.wallet_state_manager.lock:
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager, wallet, uint64(101)
)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
coins = await did_wallet.select_coins(1)
coin = coins.pop()
info = Program.to([])
pubkey = (await did_wallet.wallet_state_manager.get_unused_derivation_record(did_wallet.wallet_info.id)).pubkey
spend_bundle = await did_wallet.recovery_spend(
coin, ph, info, pubkey, SpendBundle([], AugSchemeMPL.aggregate([]))
)
additions = spend_bundle.additions()
assert additions == []
@pytest.mark.asyncio
async def test_did_attest_after_recovery(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
server_1 = full_node_1.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
async with wallet_node.wallet_state_manager.lock:
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager, wallet, uint64(101)
)
ph2 = await wallet2.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
recovery_list = [bytes.fromhex(did_wallet.get_my_DID())]
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_2: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, wallet2, uint64(101), recovery_list
)
ph = await wallet.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_2.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_2.get_unconfirmed_balance, 101)
assert did_wallet_2.did_info.backup_ids == recovery_list
# Update coin with new ID info
recovery_list = [bytes.fromhex(did_wallet_2.get_my_DID())]
await did_wallet.update_recovery_list(recovery_list, uint64(1))
assert did_wallet.did_info.backup_ids == recovery_list
await did_wallet.create_update_spend()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
# DID Wallet 2 recovers into DID Wallet 3 with new innerpuz
filename = "test.backup"
did_wallet_2.create_backup(filename)
async with wallet_node.wallet_state_manager.lock:
did_wallet_3 = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_node.wallet_state_manager,
wallet,
filename,
)
new_ph = await did_wallet_3.get_new_inner_hash()
coins = await did_wallet_2.select_coins(1)
coin = coins.pop()
pubkey = (
await did_wallet_3.wallet_state_manager.get_unused_derivation_record(did_wallet_3.wallet_info.id)
).pubkey
message_spend_bundle = await did_wallet.create_attestment(coin.name(), new_ph, pubkey, "test.attest")
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
(
info,
message_spend_bundle,
) = await did_wallet_3.load_attest_files_for_recovery_spend(["test.attest"])
await did_wallet_3.recovery_spend(coin, new_ph, info, pubkey, message_spend_bundle)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_3.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_3.get_unconfirmed_balance, 101)
# DID Wallet 1 recovery spends into DID Wallet 4
filename = "test.backup"
did_wallet.create_backup(filename)
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_4 = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_node_2.wallet_state_manager,
wallet2,
filename,
)
coins = await did_wallet.select_coins(1)
coin = coins.pop()
new_ph = await did_wallet_4.get_new_inner_hash()
pubkey = (
await did_wallet_4.wallet_state_manager.get_unused_derivation_record(did_wallet_4.wallet_info.id)
).pubkey
await did_wallet_3.create_attestment(coin.name(), new_ph, pubkey, "test.attest")
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
(
test_info_list,
test_message_spend_bundle,
) = await did_wallet_4.load_attest_files_for_recovery_spend(["test.attest"])
await did_wallet_4.recovery_spend(coin, new_ph, test_info_list, pubkey, test_message_spend_bundle)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_4.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_4.get_unconfirmed_balance, 101)
await time_out_assert(15, did_wallet.get_confirmed_balance, 0)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 0)
| 43.480176 | 119 | 0.693262 | import asyncio
import pytest
from mint.simulator.simulator_protocol import FarmNewBlockProtocol
from mint.types.peer_info import PeerInfo
from mint.util.ints import uint16, uint32, uint64
from tests.setup_nodes import setup_simulators_and_wallets
from mint.wallet.did_wallet.did_wallet import DIDWallet
from mint.types.blockchain_format.program import Program
from blspy import AugSchemeMPL
from mint.types.spend_bundle import SpendBundle
from mint.consensus.block_rewards import calculate_pool_reward, calculate_base_farmer_reward
from tests.time_out_assert import time_out_assert
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestDIDWallet:
@pytest.fixture(scope="function")
async def wallet_node(self):
async for _ in setup_simulators_and_wallets(1, 1, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.fixture(scope="function")
async def three_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 3, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes_five_freeze(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.fixture(scope="function")
async def three_sim_two_wallets(self):
async for _ in setup_simulators_and_wallets(3, 2, {}):
yield _
@pytest.mark.asyncio
async def test_creation_from_backup_file(self, three_wallet_nodes):
num_blocks = 5
full_nodes, wallets = three_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, server_0 = wallets[0]
wallet_node_1, server_1 = wallets[1]
wallet_node_2, server_2 = wallets[2]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet_0.get_new_puzzlehash()
ph1 = await wallet_1.get_new_puzzlehash()
ph2 = await wallet_2.get_new_puzzlehash()
await server_0.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_1.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(10, wallet_0.get_unconfirmed_balance, funds)
await time_out_assert(10, wallet_0.get_confirmed_balance, funds)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph1))
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
async with wallet_node_0.wallet_state_manager.lock:
did_wallet_0: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_0.wallet_state_manager, wallet_0, uint64(101)
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_0.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_0.get_unconfirmed_balance, 101)
await time_out_assert(15, did_wallet_0.get_pending_change_balance, 0)
backup_ids = [bytes.fromhex(did_wallet_0.get_my_DID())]
async with wallet_node_1.wallet_state_manager.lock:
did_wallet_1: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_1.wallet_state_manager, wallet_1, uint64(201), backup_ids
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_1.get_confirmed_balance, 201)
await time_out_assert(15, did_wallet_1.get_unconfirmed_balance, 201)
await time_out_assert(15, did_wallet_1.get_pending_change_balance, 0)
filename = "test.backup"
did_wallet_1.create_backup(filename)
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_2 = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_node_2.wallet_state_manager, wallet_2, filename
)
coins = await did_wallet_1.select_coins(1)
coin = coins.copy().pop()
assert did_wallet_2.did_info.temp_coin == coin
newpuzhash = await did_wallet_2.get_new_inner_hash()
pubkey = bytes(
(await did_wallet_2.wallet_state_manager.get_unused_derivation_record(did_wallet_2.wallet_info.id)).pubkey
)
message_spend_bundle = await did_wallet_0.create_attestment(
did_wallet_2.did_info.temp_coin.name(), newpuzhash, pubkey, "test.attest"
)
print(f"pubkey: {pubkey}")
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
(
test_info_list,
test_message_spend_bundle,
) = await did_wallet_2.load_attest_files_for_recovery_spend(["test.attest"])
assert message_spend_bundle == test_message_spend_bundle
await did_wallet_2.recovery_spend(
did_wallet_2.did_info.temp_coin,
newpuzhash,
test_info_list,
pubkey,
test_message_spend_bundle,
)
print(f"pubkey: {did_wallet_2}")
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(45, did_wallet_2.get_confirmed_balance, 201)
await time_out_assert(45, did_wallet_2.get_unconfirmed_balance, 201)
some_ph = 32 * b"\2"
await did_wallet_2.create_exit_spend(some_ph)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
async def get_coins_with_ph():
coins = await full_node_api.full_node.coin_store.get_coin_records_by_puzzle_hash(True, some_ph)
if len(coins) == 1:
return True
return False
await time_out_assert(15, get_coins_with_ph, True)
await time_out_assert(45, did_wallet_2.get_confirmed_balance, 0)
await time_out_assert(45, did_wallet_2.get_unconfirmed_balance, 0)
@pytest.mark.asyncio
async def test_did_recovery_with_multiple_backup_dids(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
server_1 = full_node_1.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
async with wallet_node.wallet_state_manager.lock:
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager, wallet, uint64(101)
)
ph = await wallet2.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
recovery_list = [bytes.fromhex(did_wallet.get_my_DID())]
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_2: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, wallet2, uint64(101), recovery_list
)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_2.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_2.get_unconfirmed_balance, 101)
assert did_wallet_2.did_info.backup_ids == recovery_list
recovery_list.append(bytes.fromhex(did_wallet_2.get_my_DID()))
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_3: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, wallet2, uint64(201), recovery_list
)
ph2 = await wallet.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
assert did_wallet_3.did_info.backup_ids == recovery_list
await time_out_assert(15, did_wallet_3.get_confirmed_balance, 201)
await time_out_assert(15, did_wallet_3.get_unconfirmed_balance, 201)
coins = await did_wallet_3.select_coins(1)
coin = coins.pop()
filename = "test.backup"
did_wallet_3.create_backup(filename)
async with wallet_node.wallet_state_manager.lock:
did_wallet_4 = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_node.wallet_state_manager,
wallet,
filename,
)
pubkey = (
await did_wallet_4.wallet_state_manager.get_unused_derivation_record(did_wallet_2.wallet_info.id)
).pubkey
new_ph = await did_wallet_4.get_new_inner_hash()
message_spend_bundle = await did_wallet.create_attestment(coin.name(), new_ph, pubkey, "test1.attest")
message_spend_bundle2 = await did_wallet_2.create_attestment(coin.name(), new_ph, pubkey, "test2.attest")
message_spend_bundle = message_spend_bundle.aggregate([message_spend_bundle, message_spend_bundle2])
(
test_info_list,
test_message_spend_bundle,
) = await did_wallet_4.load_attest_files_for_recovery_spend(["test1.attest", "test2.attest"])
assert message_spend_bundle == test_message_spend_bundle
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await did_wallet_4.recovery_spend(coin, new_ph, test_info_list, pubkey, message_spend_bundle)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet_4.get_confirmed_balance, 201)
await time_out_assert(15, did_wallet_4.get_unconfirmed_balance, 201)
await time_out_assert(15, did_wallet_3.get_confirmed_balance, 0)
await time_out_assert(15, did_wallet_3.get_unconfirmed_balance, 0)
@pytest.mark.asyncio
async def test_did_recovery_with_empty_set(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
server_1 = full_node_1.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
async with wallet_node.wallet_state_manager.lock:
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager, wallet, uint64(101)
)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
coins = await did_wallet.select_coins(1)
coin = coins.pop()
info = Program.to([])
pubkey = (await did_wallet.wallet_state_manager.get_unused_derivation_record(did_wallet.wallet_info.id)).pubkey
spend_bundle = await did_wallet.recovery_spend(
coin, ph, info, pubkey, SpendBundle([], AugSchemeMPL.aggregate([]))
)
additions = spend_bundle.additions()
assert additions == []
@pytest.mark.asyncio
async def test_did_attest_after_recovery(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
server_1 = full_node_1.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
async with wallet_node.wallet_state_manager.lock:
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager, wallet, uint64(101)
)
ph2 = await wallet2.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
recovery_list = [bytes.fromhex(did_wallet.get_my_DID())]
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_2: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, wallet2, uint64(101), recovery_list
)
ph = await wallet.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_2.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_2.get_unconfirmed_balance, 101)
assert did_wallet_2.did_info.backup_ids == recovery_list
recovery_list = [bytes.fromhex(did_wallet_2.get_my_DID())]
await did_wallet.update_recovery_list(recovery_list, uint64(1))
assert did_wallet.did_info.backup_ids == recovery_list
await did_wallet.create_update_spend()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
filename = "test.backup"
did_wallet_2.create_backup(filename)
async with wallet_node.wallet_state_manager.lock:
did_wallet_3 = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_node.wallet_state_manager,
wallet,
filename,
)
new_ph = await did_wallet_3.get_new_inner_hash()
coins = await did_wallet_2.select_coins(1)
coin = coins.pop()
pubkey = (
await did_wallet_3.wallet_state_manager.get_unused_derivation_record(did_wallet_3.wallet_info.id)
).pubkey
message_spend_bundle = await did_wallet.create_attestment(coin.name(), new_ph, pubkey, "test.attest")
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
(
info,
message_spend_bundle,
) = await did_wallet_3.load_attest_files_for_recovery_spend(["test.attest"])
await did_wallet_3.recovery_spend(coin, new_ph, info, pubkey, message_spend_bundle)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_3.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_3.get_unconfirmed_balance, 101)
filename = "test.backup"
did_wallet.create_backup(filename)
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_4 = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_node_2.wallet_state_manager,
wallet2,
filename,
)
coins = await did_wallet.select_coins(1)
coin = coins.pop()
new_ph = await did_wallet_4.get_new_inner_hash()
pubkey = (
await did_wallet_4.wallet_state_manager.get_unused_derivation_record(did_wallet_4.wallet_info.id)
).pubkey
await did_wallet_3.create_attestment(coin.name(), new_ph, pubkey, "test.attest")
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
(
test_info_list,
test_message_spend_bundle,
) = await did_wallet_4.load_attest_files_for_recovery_spend(["test.attest"])
await did_wallet_4.recovery_spend(coin, new_ph, test_info_list, pubkey, test_message_spend_bundle)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_4.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_4.get_unconfirmed_balance, 101)
await time_out_assert(15, did_wallet.get_confirmed_balance, 0)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 0)
| true | true |
f71e799f520b151af3e8ef5669e04ee4d51fa841 | 1,587 | py | Python | amulog/external/tpl_match.py | cpflat/amulog | b7a8c7478d2e5253158f0bce3a7f7109d23e40cb | [
"BSD-3-Clause"
] | 5 | 2019-07-03T09:57:30.000Z | 2021-02-13T13:15:47.000Z | amulog/external/tpl_match.py | cpflat/amulog | b7a8c7478d2e5253158f0bce3a7f7109d23e40cb | [
"BSD-3-Clause"
] | null | null | null | amulog/external/tpl_match.py | cpflat/amulog | b7a8c7478d2e5253158f0bce3a7f7109d23e40cb | [
"BSD-3-Clause"
] | 1 | 2021-09-09T02:21:42.000Z | 2021-09-09T02:21:42.000Z | #!/usr/bin/env python
# coding: utf-8
"""Matching raw log messages and its templates
that is generated by external tools."""
import re
from collections import defaultdict
# shortest match
REPLACER_REGEX_ESCAPED = re.compile(r"\\\*[A-Z]*?\\\*")
def add_esc_external(tpl):
"""Add escape sequence for imported external templates.
It fails if the template has some statement that cannot
be distinguished with variable replacers (e.g., *****).
In that case, use option log_template_import.import_format_ext_esc
and add escape sequences manually (or with another way).
"""
from amulog import strutil
from amulog import lt_common
l_wild = lt_common.REPLACER_REGEX.findall(tpl)
l_others = [strutil.add_esc(tmp)
for tmp in lt_common.REPLACER_REGEX.split(tpl)]
return "".join([other + wild for other, wild in zip(l_others, l_wild + [""])])
def generate_regex(tpl):
d_name = defaultdict(list)
def _replace_wildcard(matchobj):
name = matchobj.group(0).strip("\\*")
v = len(d_name[name])
vname = "v" + name + str(v)
d_name[name].append(vname)
# shortest match
regexstr = r"(?P<" + vname + r">[^*]*)"
return regexstr
regex_base = r"^" + re.escape(tpl) + r"$"
tmp = REPLACER_REGEX_ESCAPED.sub(_replace_wildcard, regex_base, count=0)
return re.compile(tmp)
def match_line(parsed_line, l_regex):
for rid, regex in enumerate(l_regex):
m = regex.match(parsed_line["message"])
if m:
return rid, m
else:
return None
| 29.388889 | 82 | 0.653434 |
import re
from collections import defaultdict
REPLACER_REGEX_ESCAPED = re.compile(r"\\\*[A-Z]*?\\\*")
def add_esc_external(tpl):
from amulog import strutil
from amulog import lt_common
l_wild = lt_common.REPLACER_REGEX.findall(tpl)
l_others = [strutil.add_esc(tmp)
for tmp in lt_common.REPLACER_REGEX.split(tpl)]
return "".join([other + wild for other, wild in zip(l_others, l_wild + [""])])
def generate_regex(tpl):
d_name = defaultdict(list)
def _replace_wildcard(matchobj):
name = matchobj.group(0).strip("\\*")
v = len(d_name[name])
vname = "v" + name + str(v)
d_name[name].append(vname)
regexstr = r"(?P<" + vname + r">[^*]*)"
return regexstr
regex_base = r"^" + re.escape(tpl) + r"$"
tmp = REPLACER_REGEX_ESCAPED.sub(_replace_wildcard, regex_base, count=0)
return re.compile(tmp)
def match_line(parsed_line, l_regex):
for rid, regex in enumerate(l_regex):
m = regex.match(parsed_line["message"])
if m:
return rid, m
else:
return None
| true | true |
f71e79a12d7b5249926962e2bc9b26fef30bcffc | 4,889 | py | Python | PiSnapND/s2a_fm/Snap!Files/Snap!Mobile/arduino/scratch_http_server.py | rasplay/PiSnap- | 657b97d2349604ee5d67dd8f055a1070ba57a676 | [
"MIT"
] | null | null | null | PiSnapND/s2a_fm/Snap!Files/Snap!Mobile/arduino/scratch_http_server.py | rasplay/PiSnap- | 657b97d2349604ee5d67dd8f055a1070ba57a676 | [
"MIT"
] | null | null | null | PiSnapND/s2a_fm/Snap!Files/Snap!Mobile/arduino/scratch_http_server.py | rasplay/PiSnap- | 657b97d2349604ee5d67dd8f055a1070ba57a676 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 25 14:45:49 2013
@author: Alan Yorinks
Copyright (c) 2013-14 Alan Yorinks All right reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import logging
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from string import split
class GetHandler(BaseHTTPRequestHandler):
"""
This class contains the HTTP server that Scratch2 communicates with
Scratch sends HTTP GET requests and this class processes the requests.
HTTP GET requests are accepted and the appropriate command handler is
called to process the command.
"""
firmata = None
# tcp server port - must match that in the .s2e descriptor file
port = 50209
# instance handle for the scratch command handler
scratch_command_handler = None
#indicator so that we can tell user Scratch is ready to go
waiting_for_first_scratch_poll = True
# this is a 'classmethod' because we need to set data before starting
# the HTTP server.
#noinspection PyMethodParameters
@classmethod
def set_items(self, firmata, scratch_command_handler):
"""
This method stores the input parameters for later use.
It is a class method, because these values need to established
prior to instantiating the class
"""
# instance variable for PyMata
#noinspection PyAttributeOutsideInit
self.firmata = firmata
# instance variable for scratch command handler
#noinspection PyAttributeOutsideInit
self.command_handler = scratch_command_handler
#noinspection PyPep8Naming
def do_GET(self):
"""
Scratch2 only sends HTTP GET commands. This method processes them.
It differentiates between a "normal" command request and a request
to send policy information to keep Flash happy on Scratch.
(This may change when Scratch is converted to HTML 5
"""
# skip over the / in the command
cmd = self.path[1:]
# create a list containing the command and all of its parameters
cmd_list = split(cmd, '/')
# get the command handler method for the command and call the handler
# cmd_list[0] contains the command. look up the command method
s = self.command_handler.do_command(cmd_list)
self.send_resp(s)
# we can't use the standard send_response since we don't conform to its
# standards, so we craft our own response handler here
def send_resp(self, response):
"""
This method sends Scratch an HTTP response to an HTTP GET command.
"""
crlf = "\r\n"
# http_response = str(response + crlf)
http_response = "HTTP/1.1 200 OK" + crlf
http_response += "Content-Type: text/html; charset=ISO-8859-1" + crlf
http_response += "Content-Length" + str(len(response)) + crlf
http_response += "Access-Control-Allow-Origin: *" + crlf
http_response += crlf
#add the response to the nonsense above
if response != 'okay':
http_response += str(response + crlf)
# send it out the door to Scratch
self.wfile.write(http_response)
def start_server(firmata, command_handler):
"""
This function populates class variables with essential data and
instantiates the HTTP Server
"""
GetHandler.set_items(firmata, command_handler)
try:
server = HTTPServer(('192.168.2.189', 50209), GetHandler)
print 'Starting HTTP Server!'
print 'Use <Ctrl-C> to exit the extension\n'
print 'Please start Scratch or Snap!'
except Exception:
logging.debug('Exception in scratch_http_server.py: HTTP Socket may already be in use - restart Scratch')
print 'HTTP Socket may already be in use - restart Scratch'
raise
try:
#start the server
server.serve_forever()
except KeyboardInterrupt:
logging.info('scratch_http_server.py: keyboard interrupt exception')
print "Goodbye !"
raise KeyboardInterrupt
except Exception:
logging.debug('scratch_http_server.py: Exception %s' % str(Exception))
raise | 36.759398 | 113 | 0.689712 |
"""
Created on Mon Nov 25 14:45:49 2013
@author: Alan Yorinks
Copyright (c) 2013-14 Alan Yorinks All right reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import logging
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from string import split
class GetHandler(BaseHTTPRequestHandler):
"""
This class contains the HTTP server that Scratch2 communicates with
Scratch sends HTTP GET requests and this class processes the requests.
HTTP GET requests are accepted and the appropriate command handler is
called to process the command.
"""
firmata = None
port = 50209
scratch_command_handler = None
waiting_for_first_scratch_poll = True
@classmethod
def set_items(self, firmata, scratch_command_handler):
"""
This method stores the input parameters for later use.
It is a class method, because these values need to established
prior to instantiating the class
"""
self.firmata = firmata
self.command_handler = scratch_command_handler
def do_GET(self):
"""
Scratch2 only sends HTTP GET commands. This method processes them.
It differentiates between a "normal" command request and a request
to send policy information to keep Flash happy on Scratch.
(This may change when Scratch is converted to HTML 5
"""
cmd = self.path[1:]
cmd_list = split(cmd, '/')
s = self.command_handler.do_command(cmd_list)
self.send_resp(s)
def send_resp(self, response):
"""
This method sends Scratch an HTTP response to an HTTP GET command.
"""
crlf = "\r\n"
http_response = "HTTP/1.1 200 OK" + crlf
http_response += "Content-Type: text/html; charset=ISO-8859-1" + crlf
http_response += "Content-Length" + str(len(response)) + crlf
http_response += "Access-Control-Allow-Origin: *" + crlf
http_response += crlf
if response != 'okay':
http_response += str(response + crlf)
self.wfile.write(http_response)
def start_server(firmata, command_handler):
"""
This function populates class variables with essential data and
instantiates the HTTP Server
"""
GetHandler.set_items(firmata, command_handler)
try:
server = HTTPServer(('192.168.2.189', 50209), GetHandler)
print 'Starting HTTP Server!'
print 'Use <Ctrl-C> to exit the extension\n'
print 'Please start Scratch or Snap!'
except Exception:
logging.debug('Exception in scratch_http_server.py: HTTP Socket may already be in use - restart Scratch')
print 'HTTP Socket may already be in use - restart Scratch'
raise
try:
server.serve_forever()
except KeyboardInterrupt:
logging.info('scratch_http_server.py: keyboard interrupt exception')
print "Goodbye !"
raise KeyboardInterrupt
except Exception:
logging.debug('scratch_http_server.py: Exception %s' % str(Exception))
raise | false | true |
f71e79a6e2049d202d18bd4eb9b2e1332868a805 | 15,551 | py | Python | tests/test_autointerface.py | sphinx-contrib/zopeext | b749d0023f4fb8b8eea3a8f3216f63397c6272de | [
"BSD-2-Clause"
] | 1 | 2020-03-16T07:20:58.000Z | 2020-03-16T07:20:58.000Z | tests/test_autointerface.py | sphinx-contrib/zopeext | b749d0023f4fb8b8eea3a8f3216f63397c6272de | [
"BSD-2-Clause"
] | 3 | 2021-12-19T09:39:45.000Z | 2022-01-06T05:05:03.000Z | tests/test_autointerface.py | sphinx-contrib/zopeext | b749d0023f4fb8b8eea3a8f3216f63397c6272de | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import pytest
# Add current directory to path so we can import the example.py file.
sys.path.insert(0, os.path.abspath(__file__))
pytest_plugins = "sphinx.testing.fixtures"
@pytest.mark.sphinx(
"html",
srcdir=os.path.join(os.path.dirname(__file__), "examples"),
)
@pytest.mark.skip(reason="Test needs updating.")
def test_sphinx_build(app, status, warning):
app.build()
html = (app.outdir / "index.html").read_text()
for _n, _E in enumerate(_EXPECTED):
assert _E.strip() in html
_EXPECTED = [
"""
<script>
$(document).ready(function() {
$('.interface').addClass('class');
});
</script>
""",
"""
<div class="section" id="the-example-module">
<h1>The <a class="reference internal" href="#module-example" title="example"><code class="xref py py-mod docutils literal notranslate"><span class="pre">example</span></code></a> Module<a class="headerlink" href="#the-example-module" title="Permalink to this headline">¶</a></h1>
<p>Here is a reference to the Interface: <a class="reference internal" href="#example.IMyInterface" title="example.IMyInterface"><code class="xref py py-interface docutils literal notranslate"><span class="pre">example.IMyInterface</span></code></a>, and to the
implementation: <a class="reference internal" href="#example.MyImplementation" title="example.MyImplementation"><code class="xref py py-class docutils literal notranslate"><span class="pre">example.MyImplementation</span></code></a>.</p>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 90%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p><a class="reference internal" href="#module-example" title="example"><code class="xref py py-obj docutils literal notranslate"><span class="pre">example</span></code></a></p></td>
<td><p>Example module using <code class="xref py py-mod docutils literal notranslate"><span class="pre">zope.interface</span></code>.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#example.IMyInterface" title="example.IMyInterface"><code class="xref py py-obj docutils literal notranslate"><span class="pre">example.IMyInterface</span></code></a>(x)</p></td>
<td><p>This is an example of an interface.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#example.MyImplementation" title="example.MyImplementation"><code class="xref py py-obj docutils literal notranslate"><span class="pre">example.MyImplementation</span></code></a>(x[, y])</p></td>
<td><p>Example</p></td>
</tr>
</tbody>
</table>
<span class="target" id="module-example"></span><p>Example module using <code class="xref py py-mod docutils literal notranslate"><span class="pre">zope.interface</span></code>.</p>
<p>Here we define an interface <a class="reference internal" href="#example.IMyInterface" title="example.IMyInterface"><code class="xref py py-interface docutils literal notranslate"><span class="pre">IMyInterface</span></code></a> and an
implementation <a class="reference internal" href="#example.MyImplementation" title="example.MyImplementation"><code class="xref py py-class docutils literal notranslate"><span class="pre">MyImplementation</span></code></a>.</p>
""",
"""
<dl class="py interface">
<dt class="sig sig-object py" id="example.IMyInterface">
<em class="property"><span class="pre">interface</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">example.</span></span><span class="sig-name descname"><span class="pre">IMyInterface</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#example.IMyInterface" title="Permalink to this definition">¶</a></dt>
<dd><p>This is an example of an interface.</p>
<dl class="py attribute">
<dt class="sig sig-object py" id="example.IMyInterface.x">
<span class="sig-name descname"><span class="pre">x</span></span><a class="headerlink" href="#example.IMyInterface.x" title="Permalink to this definition">¶</a></dt>
<dd><p>A required attribute of the interface</p>
</dd></dl>
<dl class="py method">
<dt class="sig sig-object py" id="example.IMyInterface.equals">
<span class="sig-name descname"><span class="pre">equals</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#example.IMyInterface.equals" title="Permalink to this definition">¶</a></dt>
<dd><p>A required method of the interface.</p>
<dl class="simple">
<dt>x<span class="classifier">float</span></dt><dd><p>The parameter <cite>x</cite>.</p>
</dd>
</dl>
<p>The argument <cite>self</cite> is not specified as part of the interface and
should be omitted, even though it is required in the implementation.</p>
</dd></dl>
</dd></dl>""",
"""
<dl class="py interface">
<dt class="sig sig-object py" id="example.IMySecondInterface">
<em class="property"><span class="pre">interface</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">example.</span></span><span class="sig-name descname"><span class="pre">IMySecondInterface</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#example.IMySecondInterface" title="Permalink to this definition">¶</a></dt>
<dd><p>A refinement of the previous interface.</p>
<dl class="py attribute">
<dt class="sig sig-object py" id="example.IMySecondInterface.y">
<span class="sig-name descname"><span class="pre">y</span></span><a class="headerlink" href="#example.IMySecondInterface.y" title="Permalink to this definition">¶</a></dt>
<dd><p>A new required attribute</p>
</dd></dl>
</dd></dl>""",
"""
<dl class="py class">
<dt class="sig sig-object py" id="example.MyImplementation">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">example.</span></span><span class="sig-name descname"><span class="pre">MyImplementation</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">y</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">3.0</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#example.MyImplementation" title="Permalink to this definition">¶</a></dt>
<dd><p>Example</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">a</span> <span class="o">=</span> <span class="n">MyImplementation</span><span class="p">(</span><span class="n">x</span><span class="o">=</span><span class="mf">2.0</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">a</span><span class="o">.</span><span class="n">equals</span><span class="p">(</span><span class="mf">2.0</span><span class="p">)</span>
<span class="go">True</span>
</pre></div>
</div>
<dl class="py method">
<dt class="sig sig-object py" id="example.MyImplementation.equals">
<span class="sig-name descname"><span class="pre">equals</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#example.MyImplementation.equals" title="Permalink to this definition">¶</a></dt>
<dd><p>A required method of the interface.</p>
<dl class="simple">
<dt>x<span class="classifier">float</span></dt><dd><p>The parameter <cite>x</cite>.</p>
</dd>
</dl>
</dd></dl>
</dd></dl>""",
"""
<li><p>Here is an explicit example of <cite>autointerface</cite></p>
<dl class="py interface">
<dt class="sig sig-object py">
<em class="property"><span class="pre">interface</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">example.</span></span><span class="sig-name descname"><span class="pre">IMyInterface</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span></dt>
<dd><p>This is an example of an interface.</p>
<dl class="py attribute">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">x</span></span></dt>
<dd><p>A required attribute of the interface</p>
</dd></dl>
<dl class="py method">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">equals</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span></dt>
<dd><p>A required method of the interface.</p>
<dl class="simple">
<dt>x<span class="classifier">float</span></dt><dd><p>The parameter <cite>x</cite>.</p>
</dd>
</dl>
<p>The argument <cite>self</cite> is not specified as part of the interface and
should be omitted, even though it is required in the implementation.</p>
</dd></dl>
</dd></dl>""",
"""
<dl class="py interface">
<dt class="sig sig-object py">
<em class="property"><span class="pre">interface</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">example.</span></span><span class="sig-name descname"><span class="pre">IMySecondInterface</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span></dt>
<dd><p>Bases: <a class="reference internal" href="#example.IMyInterface" title="example.IMyInterface"><code class="xref py py-class docutils literal notranslate"><span class="pre">example.IMyInterface</span></code></a></p>
<p>A refinement of the previous interface.</p>
<dl class="py attribute">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">y</span></span></dt>
<dd><p>A new required attribute</p>
</dd></dl>
</dd></dl>""",
"""
<li><p>Now the interface with explicit members.</p>
<dl class="py interface">
<dt class="sig sig-object py">
<em class="property"><span class="pre">interface</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">example.</span></span><span class="sig-name descname"><span class="pre">IMyInterface</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span></dt>
<dd><p>This is an example of an interface.</p>
<dl class="py method">
<dt class="sig sig-object py" id="example.IMyInterface.__init__">
<span class="sig-name descname"><span class="pre">__init__</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kw</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#example.IMyInterface.__init__" title="Permalink to this definition">¶</a></dt>
<dd><p>The constructor should set the attribute <cite>x</cite>.</p>
<dl class="simple">
<dt>x<span class="classifier">float</span></dt><dd><p>The parameter <cite>x</cite>.</p>
</dd>
</dl>
</dd></dl>
<dl class="py attribute">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">_a</span></span></dt>
<dd><p>A private required attribute of the interface</p>
</dd></dl>
<dl class="py method">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">equals</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span></dt>
<dd><p>A required method of the interface.</p>
<dl class="simple">
<dt>x<span class="classifier">float</span></dt><dd><p>The parameter <cite>x</cite>.</p>
</dd>
</dl>
<p>The argument <cite>self</cite> is not specified as part of the interface and
should be omitted, even though it is required in the implementation.</p>
</dd></dl>
</dd></dl>
</li>""",
"""
<li><p>Now the class with explicit members.</p>
<dl class="py class">
<dt class="sig sig-object py">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">example.</span></span><span class="sig-name descname"><span class="pre">MyImplementation</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">y</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">3.0</span></span></em><span class="sig-paren">)</span></dt>
<dd><p>Example</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">a</span> <span class="o">=</span> <span class="n">MyImplementation</span><span class="p">(</span><span class="n">x</span><span class="o">=</span><span class="mf">2.0</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">a</span><span class="o">.</span><span class="n">equals</span><span class="p">(</span><span class="mf">2.0</span><span class="p">)</span>
<span class="go">True</span>
</pre></div>
</div>
<dl class="py method">
<dt class="sig sig-object py" id="example.MyImplementation.__init__">
<span class="sig-name descname"><span class="pre">__init__</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">y</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">3.0</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#example.MyImplementation.__init__" title="Permalink to this definition">¶</a></dt>
<dd><p>Constructor.</p>
<dl class="simple">
<dt>x<span class="classifier">float</span></dt><dd><p>The parameter <cite>x</cite>.</p>
</dd>
<dt>y<span class="classifier">float, optional</span></dt><dd><p>An additional parameter <cite>y</cite> that is not part of the interface, but which
has a default value (3.0) and so does not violate the interface definition.</p>
</dd>
</dl>
</dd></dl>
<dl class="py method">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">equals</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span></dt>
<dd><p>A required method of the interface.</p>
<dl class="simple">
<dt>x<span class="classifier">float</span></dt><dd><p>The parameter <cite>x</cite>.</p>
</dd>
</dl>
</dd></dl>
<dl class="py attribute">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">x</span></span><em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="pre">None</span></em></dt>
<dd></dd></dl>
<dl class="py attribute">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">y</span></span><em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="pre">None</span></em></dt>
<dd></dd></dl>
</dd></dl>
</li>""",
]
| 64.795833 | 672 | 0.67957 |
import os
import sys
import pytest
sys.path.insert(0, os.path.abspath(__file__))
pytest_plugins = "sphinx.testing.fixtures"
@pytest.mark.sphinx(
"html",
srcdir=os.path.join(os.path.dirname(__file__), "examples"),
)
@pytest.mark.skip(reason="Test needs updating.")
def test_sphinx_build(app, status, warning):
app.build()
html = (app.outdir / "index.html").read_text()
for _n, _E in enumerate(_EXPECTED):
assert _E.strip() in html
_EXPECTED = [
"""
<script>
$(document).ready(function() {
$('.interface').addClass('class');
});
</script>
""",
"""
<div class="section" id="the-example-module">
<h1>The <a class="reference internal" href="#module-example" title="example"><code class="xref py py-mod docutils literal notranslate"><span class="pre">example</span></code></a> Module<a class="headerlink" href="#the-example-module" title="Permalink to this headline">¶</a></h1>
<p>Here is a reference to the Interface: <a class="reference internal" href="#example.IMyInterface" title="example.IMyInterface"><code class="xref py py-interface docutils literal notranslate"><span class="pre">example.IMyInterface</span></code></a>, and to the
implementation: <a class="reference internal" href="#example.MyImplementation" title="example.MyImplementation"><code class="xref py py-class docutils literal notranslate"><span class="pre">example.MyImplementation</span></code></a>.</p>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 90%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p><a class="reference internal" href="#module-example" title="example"><code class="xref py py-obj docutils literal notranslate"><span class="pre">example</span></code></a></p></td>
<td><p>Example module using <code class="xref py py-mod docutils literal notranslate"><span class="pre">zope.interface</span></code>.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#example.IMyInterface" title="example.IMyInterface"><code class="xref py py-obj docutils literal notranslate"><span class="pre">example.IMyInterface</span></code></a>(x)</p></td>
<td><p>This is an example of an interface.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#example.MyImplementation" title="example.MyImplementation"><code class="xref py py-obj docutils literal notranslate"><span class="pre">example.MyImplementation</span></code></a>(x[, y])</p></td>
<td><p>Example</p></td>
</tr>
</tbody>
</table>
<span class="target" id="module-example"></span><p>Example module using <code class="xref py py-mod docutils literal notranslate"><span class="pre">zope.interface</span></code>.</p>
<p>Here we define an interface <a class="reference internal" href="#example.IMyInterface" title="example.IMyInterface"><code class="xref py py-interface docutils literal notranslate"><span class="pre">IMyInterface</span></code></a> and an
implementation <a class="reference internal" href="#example.MyImplementation" title="example.MyImplementation"><code class="xref py py-class docutils literal notranslate"><span class="pre">MyImplementation</span></code></a>.</p>
""",
"""
<dl class="py interface">
<dt class="sig sig-object py" id="example.IMyInterface">
<em class="property"><span class="pre">interface</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">example.</span></span><span class="sig-name descname"><span class="pre">IMyInterface</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#example.IMyInterface" title="Permalink to this definition">¶</a></dt>
<dd><p>This is an example of an interface.</p>
<dl class="py attribute">
<dt class="sig sig-object py" id="example.IMyInterface.x">
<span class="sig-name descname"><span class="pre">x</span></span><a class="headerlink" href="#example.IMyInterface.x" title="Permalink to this definition">¶</a></dt>
<dd><p>A required attribute of the interface</p>
</dd></dl>
<dl class="py method">
<dt class="sig sig-object py" id="example.IMyInterface.equals">
<span class="sig-name descname"><span class="pre">equals</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#example.IMyInterface.equals" title="Permalink to this definition">¶</a></dt>
<dd><p>A required method of the interface.</p>
<dl class="simple">
<dt>x<span class="classifier">float</span></dt><dd><p>The parameter <cite>x</cite>.</p>
</dd>
</dl>
<p>The argument <cite>self</cite> is not specified as part of the interface and
should be omitted, even though it is required in the implementation.</p>
</dd></dl>
</dd></dl>""",
"""
<dl class="py interface">
<dt class="sig sig-object py" id="example.IMySecondInterface">
<em class="property"><span class="pre">interface</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">example.</span></span><span class="sig-name descname"><span class="pre">IMySecondInterface</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#example.IMySecondInterface" title="Permalink to this definition">¶</a></dt>
<dd><p>A refinement of the previous interface.</p>
<dl class="py attribute">
<dt class="sig sig-object py" id="example.IMySecondInterface.y">
<span class="sig-name descname"><span class="pre">y</span></span><a class="headerlink" href="#example.IMySecondInterface.y" title="Permalink to this definition">¶</a></dt>
<dd><p>A new required attribute</p>
</dd></dl>
</dd></dl>""",
"""
<dl class="py class">
<dt class="sig sig-object py" id="example.MyImplementation">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">example.</span></span><span class="sig-name descname"><span class="pre">MyImplementation</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">y</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">3.0</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#example.MyImplementation" title="Permalink to this definition">¶</a></dt>
<dd><p>Example</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">a</span> <span class="o">=</span> <span class="n">MyImplementation</span><span class="p">(</span><span class="n">x</span><span class="o">=</span><span class="mf">2.0</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">a</span><span class="o">.</span><span class="n">equals</span><span class="p">(</span><span class="mf">2.0</span><span class="p">)</span>
<span class="go">True</span>
</pre></div>
</div>
<dl class="py method">
<dt class="sig sig-object py" id="example.MyImplementation.equals">
<span class="sig-name descname"><span class="pre">equals</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#example.MyImplementation.equals" title="Permalink to this definition">¶</a></dt>
<dd><p>A required method of the interface.</p>
<dl class="simple">
<dt>x<span class="classifier">float</span></dt><dd><p>The parameter <cite>x</cite>.</p>
</dd>
</dl>
</dd></dl>
</dd></dl>""",
"""
<li><p>Here is an explicit example of <cite>autointerface</cite></p>
<dl class="py interface">
<dt class="sig sig-object py">
<em class="property"><span class="pre">interface</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">example.</span></span><span class="sig-name descname"><span class="pre">IMyInterface</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span></dt>
<dd><p>This is an example of an interface.</p>
<dl class="py attribute">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">x</span></span></dt>
<dd><p>A required attribute of the interface</p>
</dd></dl>
<dl class="py method">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">equals</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span></dt>
<dd><p>A required method of the interface.</p>
<dl class="simple">
<dt>x<span class="classifier">float</span></dt><dd><p>The parameter <cite>x</cite>.</p>
</dd>
</dl>
<p>The argument <cite>self</cite> is not specified as part of the interface and
should be omitted, even though it is required in the implementation.</p>
</dd></dl>
</dd></dl>""",
"""
<dl class="py interface">
<dt class="sig sig-object py">
<em class="property"><span class="pre">interface</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">example.</span></span><span class="sig-name descname"><span class="pre">IMySecondInterface</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span></dt>
<dd><p>Bases: <a class="reference internal" href="#example.IMyInterface" title="example.IMyInterface"><code class="xref py py-class docutils literal notranslate"><span class="pre">example.IMyInterface</span></code></a></p>
<p>A refinement of the previous interface.</p>
<dl class="py attribute">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">y</span></span></dt>
<dd><p>A new required attribute</p>
</dd></dl>
</dd></dl>""",
"""
<li><p>Now the interface with explicit members.</p>
<dl class="py interface">
<dt class="sig sig-object py">
<em class="property"><span class="pre">interface</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">example.</span></span><span class="sig-name descname"><span class="pre">IMyInterface</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span></dt>
<dd><p>This is an example of an interface.</p>
<dl class="py method">
<dt class="sig sig-object py" id="example.IMyInterface.__init__">
<span class="sig-name descname"><span class="pre">__init__</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kw</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#example.IMyInterface.__init__" title="Permalink to this definition">¶</a></dt>
<dd><p>The constructor should set the attribute <cite>x</cite>.</p>
<dl class="simple">
<dt>x<span class="classifier">float</span></dt><dd><p>The parameter <cite>x</cite>.</p>
</dd>
</dl>
</dd></dl>
<dl class="py attribute">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">_a</span></span></dt>
<dd><p>A private required attribute of the interface</p>
</dd></dl>
<dl class="py method">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">equals</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span></dt>
<dd><p>A required method of the interface.</p>
<dl class="simple">
<dt>x<span class="classifier">float</span></dt><dd><p>The parameter <cite>x</cite>.</p>
</dd>
</dl>
<p>The argument <cite>self</cite> is not specified as part of the interface and
should be omitted, even though it is required in the implementation.</p>
</dd></dl>
</dd></dl>
</li>""",
"""
<li><p>Now the class with explicit members.</p>
<dl class="py class">
<dt class="sig sig-object py">
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">example.</span></span><span class="sig-name descname"><span class="pre">MyImplementation</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">y</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">3.0</span></span></em><span class="sig-paren">)</span></dt>
<dd><p>Example</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">a</span> <span class="o">=</span> <span class="n">MyImplementation</span><span class="p">(</span><span class="n">x</span><span class="o">=</span><span class="mf">2.0</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">a</span><span class="o">.</span><span class="n">equals</span><span class="p">(</span><span class="mf">2.0</span><span class="p">)</span>
<span class="go">True</span>
</pre></div>
</div>
<dl class="py method">
<dt class="sig sig-object py" id="example.MyImplementation.__init__">
<span class="sig-name descname"><span class="pre">__init__</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">y</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">3.0</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#example.MyImplementation.__init__" title="Permalink to this definition">¶</a></dt>
<dd><p>Constructor.</p>
<dl class="simple">
<dt>x<span class="classifier">float</span></dt><dd><p>The parameter <cite>x</cite>.</p>
</dd>
<dt>y<span class="classifier">float, optional</span></dt><dd><p>An additional parameter <cite>y</cite> that is not part of the interface, but which
has a default value (3.0) and so does not violate the interface definition.</p>
</dd>
</dl>
</dd></dl>
<dl class="py method">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">equals</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span></dt>
<dd><p>A required method of the interface.</p>
<dl class="simple">
<dt>x<span class="classifier">float</span></dt><dd><p>The parameter <cite>x</cite>.</p>
</dd>
</dl>
</dd></dl>
<dl class="py attribute">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">x</span></span><em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="pre">None</span></em></dt>
<dd></dd></dl>
<dl class="py attribute">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">y</span></span><em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="pre">None</span></em></dt>
<dd></dd></dl>
</dd></dl>
</li>""",
]
| true | true |
f71e7ab3cb5bcd17f3735b3ac4b491ebc205bce5 | 1,134 | py | Python | Modules/Filtering/Smoothing/wrapping/test/MedianImageFilterFunctionalDocumentationTest.py | HongdaZ/ITK | f5d004fa3607b8e11edc30f1ba299df35af8aff8 | [
"Apache-2.0"
] | 1 | 2021-01-10T14:19:08.000Z | 2021-01-10T14:19:08.000Z | Modules/Filtering/Smoothing/wrapping/test/MedianImageFilterFunctionalDocumentationTest.py | HongdaZ/ITK | f5d004fa3607b8e11edc30f1ba299df35af8aff8 | [
"Apache-2.0"
] | 1 | 2017-03-19T12:56:50.000Z | 2018-10-24T10:40:21.000Z | Modules/Filtering/Smoothing/wrapping/test/MedianImageFilterFunctionalDocumentationTest.py | HongdaZ/ITK | f5d004fa3607b8e11edc30f1ba299df35af8aff8 | [
"Apache-2.0"
] | 1 | 2020-07-24T22:58:19.000Z | 2020-07-24T22:58:19.000Z | #==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Example on the use of the MedianImageFilter
#
import itk
# Test that docstring in snake_case function is replaced by
# docstring from corresponding object.
# Not the default docstring.
assert "Procedural interface for" not in itk.median_image_filter.__doc__
# But the actual filter docstring.
assert "Applies a median filter to an image" in itk.median_image_filter.__doc__
| 35.4375 | 79 | 0.652557 |
import itk
assert "Procedural interface for" not in itk.median_image_filter.__doc__
assert "Applies a median filter to an image" in itk.median_image_filter.__doc__
| true | true |
f71e7e5849fed1929fa7be9aa2c73ed76e795347 | 3,026 | py | Python | statsmodels/graphics/tests/test_factorplots.py | aliavni/statsmodels | ef5d57a8d45de76a895e9401705280d558d688ad | [
"BSD-3-Clause"
] | 1 | 2022-01-24T15:17:37.000Z | 2022-01-24T15:17:37.000Z | statsmodels/graphics/tests/test_factorplots.py | aliavni/statsmodels | ef5d57a8d45de76a895e9401705280d558d688ad | [
"BSD-3-Clause"
] | null | null | null | statsmodels/graphics/tests/test_factorplots.py | aliavni/statsmodels | ef5d57a8d45de76a895e9401705280d558d688ad | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from numpy.testing import assert_equal, assert_raises
from pandas import Series
import pytest
from statsmodels.graphics.factorplots import _recode, interaction_plot
try:
import matplotlib.pyplot as plt
except ImportError:
pass
class TestInteractionPlot:
@classmethod
def setup_class(cls):
np.random.seed(12345)
cls.weight = np.random.randint(1,4,size=60)
cls.duration = np.random.randint(1,3,size=60)
cls.days = np.log(np.random.randint(1,30, size=60))
@pytest.mark.matplotlib
def test_plot_both(self, close_figures):
fig = interaction_plot(self.weight, self.duration, self.days,
colors=['red','blue'], markers=['D','^'], ms=10)
@pytest.mark.matplotlib
def test_plot_rainbow(self, close_figures):
fig = interaction_plot(self.weight, self.duration, self.days,
markers=['D','^'], ms=10)
@pytest.mark.matplotlib
@pytest.mark.parametrize('astype', ['str', 'int'])
def test_plot_pandas(self, astype, close_figures):
weight = Series(self.weight, name='Weight').astype(astype)
duration = Series(self.duration, name='Duration')
days = Series(self.days, name='Days')
fig = interaction_plot(weight, duration, days,
markers=['D', '^'], ms=10)
ax = fig.axes[0]
trace = ax.get_legend().get_title().get_text()
assert_equal(trace, 'Duration')
assert_equal(ax.get_ylabel(), 'mean of Days')
assert_equal(ax.get_xlabel(), 'Weight')
@pytest.mark.matplotlib
def test_formatting(self, close_figures):
fig = interaction_plot(self.weight, self.duration, self.days, colors=['r','g'], linestyles=['--','-.'])
assert_equal(isinstance(fig, plt.Figure), True)
@pytest.mark.matplotlib
def test_formatting_errors(self, close_figures):
assert_raises(ValueError, interaction_plot, self.weight, self.duration, self.days, markers=['D'])
assert_raises(ValueError, interaction_plot, self.weight, self.duration, self.days, colors=['b','r','g'])
assert_raises(ValueError, interaction_plot, self.weight, self.duration, self.days, linestyles=['--','-.',':'])
@pytest.mark.matplotlib
def test_plottype(self, close_figures):
fig = interaction_plot(self.weight, self.duration, self.days, plottype='line')
assert_equal(isinstance(fig, plt.Figure), True)
fig = interaction_plot(self.weight, self.duration, self.days, plottype='scatter')
assert_equal(isinstance(fig, plt.Figure), True)
assert_raises(ValueError, interaction_plot, self.weight, self.duration, self.days, plottype='unknown')
def test_recode_series(self):
series = Series(['a', 'b'] * 10, index=np.arange(0, 40, 2),
name='index_test')
series_ = _recode(series, {'a': 0, 'b': 1})
assert_equal(series_.index.values, series.index.values,
err_msg='_recode changed the index')
| 42.027778 | 118 | 0.653668 | import numpy as np
from numpy.testing import assert_equal, assert_raises
from pandas import Series
import pytest
from statsmodels.graphics.factorplots import _recode, interaction_plot
try:
import matplotlib.pyplot as plt
except ImportError:
pass
class TestInteractionPlot:
@classmethod
def setup_class(cls):
np.random.seed(12345)
cls.weight = np.random.randint(1,4,size=60)
cls.duration = np.random.randint(1,3,size=60)
cls.days = np.log(np.random.randint(1,30, size=60))
@pytest.mark.matplotlib
def test_plot_both(self, close_figures):
fig = interaction_plot(self.weight, self.duration, self.days,
colors=['red','blue'], markers=['D','^'], ms=10)
@pytest.mark.matplotlib
def test_plot_rainbow(self, close_figures):
fig = interaction_plot(self.weight, self.duration, self.days,
markers=['D','^'], ms=10)
@pytest.mark.matplotlib
@pytest.mark.parametrize('astype', ['str', 'int'])
def test_plot_pandas(self, astype, close_figures):
weight = Series(self.weight, name='Weight').astype(astype)
duration = Series(self.duration, name='Duration')
days = Series(self.days, name='Days')
fig = interaction_plot(weight, duration, days,
markers=['D', '^'], ms=10)
ax = fig.axes[0]
trace = ax.get_legend().get_title().get_text()
assert_equal(trace, 'Duration')
assert_equal(ax.get_ylabel(), 'mean of Days')
assert_equal(ax.get_xlabel(), 'Weight')
@pytest.mark.matplotlib
def test_formatting(self, close_figures):
fig = interaction_plot(self.weight, self.duration, self.days, colors=['r','g'], linestyles=['--','-.'])
assert_equal(isinstance(fig, plt.Figure), True)
@pytest.mark.matplotlib
def test_formatting_errors(self, close_figures):
assert_raises(ValueError, interaction_plot, self.weight, self.duration, self.days, markers=['D'])
assert_raises(ValueError, interaction_plot, self.weight, self.duration, self.days, colors=['b','r','g'])
assert_raises(ValueError, interaction_plot, self.weight, self.duration, self.days, linestyles=['--','-.',':'])
@pytest.mark.matplotlib
def test_plottype(self, close_figures):
fig = interaction_plot(self.weight, self.duration, self.days, plottype='line')
assert_equal(isinstance(fig, plt.Figure), True)
fig = interaction_plot(self.weight, self.duration, self.days, plottype='scatter')
assert_equal(isinstance(fig, plt.Figure), True)
assert_raises(ValueError, interaction_plot, self.weight, self.duration, self.days, plottype='unknown')
def test_recode_series(self):
series = Series(['a', 'b'] * 10, index=np.arange(0, 40, 2),
name='index_test')
series_ = _recode(series, {'a': 0, 'b': 1})
assert_equal(series_.index.values, series.index.values,
err_msg='_recode changed the index')
| true | true |
f71e7e75013346712238ce0f9ab6dfad2b41203f | 2,066 | py | Python | src/draw_pictures.py | mpeychev/disentangled-autoencoders | 2d1f18fe198486f29c74ba5606ffcadaff7055cf | [
"MIT"
] | 8 | 2017-11-24T22:26:50.000Z | 2018-10-15T07:12:51.000Z | src/draw_pictures.py | mpeychev/disentangled-autoencoders | 2d1f18fe198486f29c74ba5606ffcadaff7055cf | [
"MIT"
] | 1 | 2018-01-10T03:44:37.000Z | 2018-01-10T19:59:39.000Z | src/draw_pictures.py | mpeychev/disentangled-autoencoders | 2d1f18fe198486f29c74ba5606ffcadaff7055cf | [
"MIT"
] | 3 | 2017-12-22T01:07:14.000Z | 2019-08-08T09:45:30.000Z | from PIL import Image
import os
import util
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def show_images(images, save_name, hard=False):
print images.shape
dim = images.shape[0]
if hard:
images = np.array(
map(lambda image: map(lambda pixel: 0.0 if pixel < 0.5 else 1.0, image), images))
n_image_rows = images.shape[0] / 10
n_image_cols = 10
gs = gridspec.GridSpec(n_image_rows, n_image_cols, hspace=0., wspace=0.)
for i in range(n_image_rows):
for j in range(n_image_cols):
ax = plt.subplot(gs[i * n_image_cols + j])
ax.imshow(images[i * n_image_cols + j].reshape((64, 64)))
ax.set_xticks([])
ax.set_yticks([])
if i == 0 and j == 0:
ax.set_title('scale')
if i == 0 and j == 3:
ax.set_title('y')
if i == 0 and j == 4:
ax.set_title('x')
if i == 0 and j == 7:
ax.set_title('rotation')
if j == 0 and i == 0:
ax.set_ylabel('+1.0')
if j == 0 and i == 1:
ax.set_ylabel('+0.5')
if j == 0 and i == 2:
ax.set_ylabel('base')
if j == 0 and i == 3:
ax.set_ylabel('-0.5')
if j == 0 and i == 4:
ax.set_ylabel('-1.0')
if i == n_image_rows - 1:
ax.set_xlabel('z{0}'.format(j))
ax.set_aspect('equal')
plt.subplots_adjust(wspace=None, hspace=None)
plt.tight_layout()
plt.subplots_adjust(top=0.94)
plt.savefig(save_name + '_vis.png')
results_dir = util.get_results_dir()
images = np.load(os.path.join(results_dir, 'pictures_4.npy'))
indexes = []
SHIFT_RANGE = len(images) / 10
for shift in range(SHIFT_RANGE):
for i in range(10):
indexes.append(i * SHIFT_RANGE + shift)
new_images = []
for index in indexes:
new_images.append(images[index])
show_images(np.array(new_images), 'all')
| 32.793651 | 93 | 0.548403 | from PIL import Image
import os
import util
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def show_images(images, save_name, hard=False):
print images.shape
dim = images.shape[0]
if hard:
images = np.array(
map(lambda image: map(lambda pixel: 0.0 if pixel < 0.5 else 1.0, image), images))
n_image_rows = images.shape[0] / 10
n_image_cols = 10
gs = gridspec.GridSpec(n_image_rows, n_image_cols, hspace=0., wspace=0.)
for i in range(n_image_rows):
for j in range(n_image_cols):
ax = plt.subplot(gs[i * n_image_cols + j])
ax.imshow(images[i * n_image_cols + j].reshape((64, 64)))
ax.set_xticks([])
ax.set_yticks([])
if i == 0 and j == 0:
ax.set_title('scale')
if i == 0 and j == 3:
ax.set_title('y')
if i == 0 and j == 4:
ax.set_title('x')
if i == 0 and j == 7:
ax.set_title('rotation')
if j == 0 and i == 0:
ax.set_ylabel('+1.0')
if j == 0 and i == 1:
ax.set_ylabel('+0.5')
if j == 0 and i == 2:
ax.set_ylabel('base')
if j == 0 and i == 3:
ax.set_ylabel('-0.5')
if j == 0 and i == 4:
ax.set_ylabel('-1.0')
if i == n_image_rows - 1:
ax.set_xlabel('z{0}'.format(j))
ax.set_aspect('equal')
plt.subplots_adjust(wspace=None, hspace=None)
plt.tight_layout()
plt.subplots_adjust(top=0.94)
plt.savefig(save_name + '_vis.png')
results_dir = util.get_results_dir()
images = np.load(os.path.join(results_dir, 'pictures_4.npy'))
indexes = []
SHIFT_RANGE = len(images) / 10
for shift in range(SHIFT_RANGE):
for i in range(10):
indexes.append(i * SHIFT_RANGE + shift)
new_images = []
for index in indexes:
new_images.append(images[index])
show_images(np.array(new_images), 'all')
| false | true |
f71e7e82c3f619f7d9bc39e1ced2dcf72b788c44 | 1,977 | py | Python | erpnext_furniture_to_go/erpnext_furniture_to_go/doctype/furniture_to_go_settings/furniture_to_go_settings.py | artykbasar/erpnext_furniture_to_go | c93894b2cc23bf64ff49ffb4485a30b5be38bfc1 | [
"MIT"
] | null | null | null | erpnext_furniture_to_go/erpnext_furniture_to_go/doctype/furniture_to_go_settings/furniture_to_go_settings.py | artykbasar/erpnext_furniture_to_go | c93894b2cc23bf64ff49ffb4485a30b5be38bfc1 | [
"MIT"
] | null | null | null | erpnext_furniture_to_go/erpnext_furniture_to_go/doctype/furniture_to_go_settings/furniture_to_go_settings.py | artykbasar/erpnext_furniture_to_go | c93894b2cc23bf64ff49ffb4485a30b5be38bfc1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Artyk Basarov and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
# import erpnext_furniture_to_go.erpnext_furniture_to_go.doctype.furniture_to_go_settings.furniture_to_go_methods as f2g
from frappe.model.document import Document
class FurnitureToGoSettings(Document):
@frappe.whitelist()
def find_new_products(self):
if self.enable == 1:
frappe.enqueue('erpnext_furniture_to_go.erpnext_furniture_to_go.doctype.furniture_to_go_settings.furniture_to_go_methods.find_new_products', timeout=3000)
@frappe.whitelist()
def find_product_group(self):
if self.enable == 1:
frappe.enqueue('erpnext_furniture_to_go.erpnext_furniture_to_go.doctype.furniture_to_go_settings.furniture_to_go_methods.product_group_finder', timeout=3000)
@frappe.whitelist()
def find_product_range(self):
if self.enable == 1:
frappe.enqueue('erpnext_furniture_to_go.erpnext_furniture_to_go.doctype.furniture_to_go_settings.furniture_to_go_methods.product_range_finder', timeout=3000)
@frappe.whitelist()
def sync_products_to_items(self):
if self.enable == 1:
frappe.enqueue('erpnext_furniture_to_go.erpnext_furniture_to_go.doctype.furniture_to_go_settings.furniture_to_go_methods.f2g_to_item', timeout=30000)
@frappe.whitelist()
def auto_fill_defaults(self):
if self.enable == 1:
from erpnext_furniture_to_go.erpnext_furniture_to_go.doctype.furniture_to_go_settings.furniture_to_go_methods import default_f2g_values
default_f2g_values()
self.reload()
@frappe.whitelist()
def tester(self):
if self.enable == 1:
from erpnext_furniture_to_go.erpnext_furniture_to_go.doctype.furniture_to_go_settings.furniture_to_go_methods import default_f2g_values
default_f2g_values()
# frappe.enqueue('erpnext_furniture_to_go.erpnext_furniture_to_go.doctype.furniture_to_go_settings.furniture_to_go_methods.tester', timeout=7200)
self.reload()
| 42.978261 | 160 | 0.828022 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class FurnitureToGoSettings(Document):
@frappe.whitelist()
def find_new_products(self):
if self.enable == 1:
frappe.enqueue('erpnext_furniture_to_go.erpnext_furniture_to_go.doctype.furniture_to_go_settings.furniture_to_go_methods.find_new_products', timeout=3000)
@frappe.whitelist()
def find_product_group(self):
if self.enable == 1:
frappe.enqueue('erpnext_furniture_to_go.erpnext_furniture_to_go.doctype.furniture_to_go_settings.furniture_to_go_methods.product_group_finder', timeout=3000)
@frappe.whitelist()
def find_product_range(self):
if self.enable == 1:
frappe.enqueue('erpnext_furniture_to_go.erpnext_furniture_to_go.doctype.furniture_to_go_settings.furniture_to_go_methods.product_range_finder', timeout=3000)
@frappe.whitelist()
def sync_products_to_items(self):
if self.enable == 1:
frappe.enqueue('erpnext_furniture_to_go.erpnext_furniture_to_go.doctype.furniture_to_go_settings.furniture_to_go_methods.f2g_to_item', timeout=30000)
@frappe.whitelist()
def auto_fill_defaults(self):
if self.enable == 1:
from erpnext_furniture_to_go.erpnext_furniture_to_go.doctype.furniture_to_go_settings.furniture_to_go_methods import default_f2g_values
default_f2g_values()
self.reload()
@frappe.whitelist()
def tester(self):
if self.enable == 1:
from erpnext_furniture_to_go.erpnext_furniture_to_go.doctype.furniture_to_go_settings.furniture_to_go_methods import default_f2g_values
default_f2g_values()
self.reload()
| true | true |
f71e7ee02f11010b14d10aee34612e2db23ec030 | 83,913 | py | Python | pycoalescence/tests/test_coalescence_tree.py | thompsonsed/pycoalescence | eddce52ad7b3584e1fb208532d6851751b27dd4a | [
"MIT"
] | null | null | null | pycoalescence/tests/test_coalescence_tree.py | thompsonsed/pycoalescence | eddce52ad7b3584e1fb208532d6851751b27dd4a | [
"MIT"
] | null | null | null | pycoalescence/tests/test_coalescence_tree.py | thompsonsed/pycoalescence | eddce52ad7b3584e1fb208532d6851751b27dd4a | [
"MIT"
] | null | null | null | """
Tests the coalescence tree object.
"""
import os
import random
import shutil
import sqlite3
import sys
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from setup_tests import setUpAll, tearDownAll, skipLongTest
from pycoalescence import Simulation
from pycoalescence.coalescence_tree import CoalescenceTree, get_parameter_description
from pycoalescence.sqlite_connection import check_sql_table_exist
def setUpModule():
"""
Creates the output directory and moves logging files
"""
setUpAll()
t = CoalescenceTree("sample/sample.db")
t.clear_calculations()
def tearDownModule():
"""
Removes the output directory
"""
tearDownAll()
class TestNullSimulationErrors(unittest.TestCase):
"""
Tests that simulations that are not linked raise the correct error.
"""
def testRaisesError(self):
"""
Tests that a null simulation will raise an error when any operation is performed.
"""
t = CoalescenceTree()
with self.assertRaises(RuntimeError):
t.get_species_richness()
with self.assertRaises(RuntimeError):
t.calculate_fragment_richness()
with self.assertRaises(RuntimeError):
t.calculate_alpha_diversity()
with self.assertRaises(RuntimeError):
t.calculate_beta_diversity()
with self.assertRaises(RuntimeError):
t.calculate_fragment_abundances()
with self.assertRaises(RuntimeError):
t.calculate_fragment_octaves()
with self.assertRaises(RuntimeError):
t.calculate_octaves()
with self.assertRaises(RuntimeError):
t.get_fragment_list()
with self.assertRaises(RuntimeError):
t.get_alpha_diversity()
with self.assertRaises(RuntimeError):
t.get_beta_diversity()
with self.assertRaises(RuntimeError):
t.get_community_references()
with self.assertRaises(RuntimeError):
t.get_metacommunity_references()
with self.assertRaises(RuntimeError):
t.get_species_locations()
with self.assertRaises(RuntimeError):
t.get_species_abundances()
with self.assertRaises(RuntimeError):
t.get_species_list()
with self.assertRaises(RuntimeError):
_ = t.get_simulation_parameters()
with self.assertRaises(RuntimeError):
t.get_fragment_abundances("null", 1)
with self.assertRaises(RuntimeError):
t.get_species_richness()
with self.assertRaises(RuntimeError):
t.get_octaves(1)
class TestParameterDescriptions(unittest.TestCase):
"""
Tests that program correctly reads from the parameter_descriptions.json dictionary.
"""
def testReadsCorrectly(self):
"""
Tests that the dictionary is read correctly.
"""
tmp_dict = {
"habitat_change_rate": "the rate of change from present density maps to historic density maps",
"sample_file": "the sample area map for spatially selective sampling. Can be null to sample all " "cells",
"sample_x": "the sample map x dimension",
"sample_y": "the sample map y dimension",
"sample_x_offset": "the sample x map offset from the grid",
"sample_y_offset": "the sample y map offset from the grid",
"output_dir": "the output directory for the simulation database",
"seed": "the random seed to start the simulation, for repeatability",
"coarse_map_x": "the coarse density map x dimension",
"fine_map_file": "the density map file location at the finer resolution, covering a smaller area",
"tau": "the tau dispersal value for fat-tailed dispersal",
"grid_y": "the simulated grid y dimension",
"dispersal_relative_cost": "the relative rate of moving through non-habitat compared to habitat",
"fine_map_y_offset": "the number of cells the fine map is offset from the sample map in the y "
"dimension, at the fine resolution",
"gen_since_historical": "the number of generations that occur before the historical, or historic,"
" state is reached",
"dispersal_method": "the dispersal method used. Can be one of 'normal', 'norm-uniform' or " "'fat-tail'.",
"historical_fine_map": "the historical, or historic, coarse density map file location",
"coarse_map_scale": "the scale of the coarse density map compared to the fine density map. 1 "
"means equal density",
"grid_x": "the simulated grid x dimension",
"coarse_map_file": "the density map file location at the coarser resolution, covering a larger " "area",
"min_num_species": "the minimum number of species known to exist (currently has no effect)",
"historical_coarse_map": "the historical, or historic, coarse density map file location",
"m_probability": "the probability of choosing from the uniform dispersal kernel in normal-uniform"
" dispersal",
"sigma": "the sigma dispersal value for normal, fat-tailed and normal-uniform dispersals",
"deme": "the number of individuals inhabiting a cell at a map density of 1",
"time_config_file": "will be 'set' if temporal sampling is used, 'null' otherwise",
"coarse_map_y": "the coarse density map y dimension",
"fine_map_x": "the fine density map x dimension",
"coarse_map_y_offset": "the number of cells the coarse map is offset from the fine map in the y "
"dimension, at the fine resolution",
"cutoff": "the maximal dispersal distance possible, for normal-uniform dispersal",
"fine_map_y": "the fine density map y dimension",
"sample_size": "the proportion of individuals to sample from each cell (0-1)",
"fine_map_x_offset": "the number of cells the fine map is offset from the sample map in the x "
"dimension, at the fine resolution",
"speciation_rate": "the minimum speciation rate the simulation was run with",
"task": "the job or task reference number given to this simulation",
"coarse_map_x_offset": "the number of cells the coarse map is offset from the fine map in the x "
"dimension, at the fine resolution",
"landscape_type": "if false, landscapes have hard boundaries. Otherwise, can be infinite, "
"with 1s everywhere, or tiled_coarse or tiled_fine for repeated units of tiled "
"maps",
"max_time": "the maximum simulation time to run for (in seconds)",
"sim_complete": "set to true upon simulation completion, false for incomplete simulations",
"protracted": "if true, the simulation was run with protracted speciation.",
"min_speciation_gen": "the minimum number of generations required before speciation can occur",
"max_speciation_gen": "the maximum number of generations a lineage can exist before it is " "speciated",
"dispersal_map": "a tif file where rows represent cumulative dispersal probability to every other "
"cell, using the row number = x + (y * x_max)",
}
t = CoalescenceTree("sample/sample.db")
sim_output = t.get_simulation_parameters()
for key in sim_output.keys():
self.assertIn(key, get_parameter_description().keys())
self.assertEqual(get_parameter_description(key), t.get_parameter_description(key))
for key in get_parameter_description().keys():
self.assertIn(key, sim_output.keys())
for key in tmp_dict.keys():
self.assertEqual(tmp_dict[key], get_parameter_description(key))
self.assertDictEqual(tmp_dict, get_parameter_description())
with self.assertRaises(KeyError):
get_parameter_description(key="notakey")
dispersal_parameters = t.dispersal_parameters()
expected_disp_dict = {
"dispersal_method": "normal",
"sigma": 3.55,
"tau": 0.470149,
"m_probability": 0,
"cutoff": 0,
}
for key in dispersal_parameters.keys():
self.assertIn(key, tmp_dict.keys())
self.assertIn(key, expected_disp_dict.keys())
for key, val in expected_disp_dict.items():
self.assertIn(key, dispersal_parameters.keys())
if isinstance(val, float):
self.assertAlmostEqual(val, dispersal_parameters[key])
else:
self.assertEqual(val, dispersal_parameters[key])
class TestCoalescenceTreeSettingSpeciationParameters(unittest.TestCase):
"""Tests that the correct errors are raised when speciation parameters are supplied incorrectly."""
@classmethod
def setUpClass(cls):
"""Generates the temporary databases to attempt analysis on."""
src = [os.path.join("sample", "sample{}.db".format(x)) for x in [2, 3]]
cls.dst = [os.path.join("output", "sample{}.db".format(x)) for x in [2, 3]]
for tmp_src, tmp_dst in zip(src, cls.dst):
if os.path.exists(tmp_dst):
os.remove(tmp_dst)
shutil.copy(tmp_src, tmp_dst)
def testSetSpeciationRates(self):
"""Tests setting speciation rates works as intended and raises appropriate errors"""
ct = CoalescenceTree(self.dst[0])
for attempt in ["a string", ["a", "string"], [["list", "list2"], 0.2, 0.1], [None]]:
with self.assertRaises(TypeError):
ct._set_speciation_rates(attempt)
with self.assertRaises(RuntimeError):
ct._set_speciation_rates(None)
for attempt in [-10, -2.0, 1.1, 100, [-1, 0.1, 0.2], [0.2, 0.8, 1.1]]:
with self.assertRaises(ValueError):
ct._set_speciation_rates(attempt)
expected_list = [0.1, 0.2, 0.3]
ct._set_speciation_rates(expected_list)
self.assertEqual(expected_list, ct.applied_speciation_rates_list)
ct._set_speciation_rates(0.2)
self.assertEqual([0.2], ct.applied_speciation_rates_list)
def testSetRecordFragments(self):
"""Tests that setting the record_fragments flag works as expected."""
ct = CoalescenceTree(self.dst[0])
ct._set_record_fragments(True)
self.assertEqual("null", ct.record_fragments)
ct._set_record_fragments(False)
self.assertEqual("F", ct.record_fragments)
for each in ["PlotBiodiversityMetrics.db", "doesntexist.csv"]:
config_path = os.path.join("sample", each)
with self.assertRaises(IOError):
ct._set_record_fragments(config_path)
expected = os.path.join("sample", "FragmentsTest.csv")
ct._set_record_fragments(expected)
self.assertEqual(expected, ct.record_fragments)
def testSetRecordSpatial(self):
"""Tests that the setting the record_spatial flag works as expected"""
ct = CoalescenceTree(self.dst[0])
ct._set_record_spatial("T")
self.assertTrue(ct.record_spatial)
ct._set_record_spatial("F")
self.assertFalse(ct.record_spatial)
with self.assertRaises(TypeError):
ct._set_record_spatial("nota bool")
ct._set_record_spatial(True)
self.assertTrue(ct.record_spatial)
def testSetMetacommunityParameters(self):
"""Tests that setting the metacommunity parameters works as expected."""
ct = CoalescenceTree(self.dst[0])
for size, spec in [[-10, 0.1], [10, -0.1], [10, 1.1]]:
with self.assertRaises(ValueError):
ct.fragments = "F"
ct._set_record_fragments(False)
ct._set_record_spatial(False)
ct.times = [0.0]
ct._set_metacommunity_parameters(size, spec)
ct._set_metacommunity_parameters()
self.assertEqual(0.0, ct.metacommunity_size)
self.assertEqual(0.0, ct.metacommunity_speciation_rate)
ct._set_metacommunity_parameters(10, 0.1, "simulated")
self.assertEqual(10, ct.metacommunity_size)
self.assertEqual(0.1, ct.metacommunity_speciation_rate)
def testSetProtractedParameters(self):
"""Tests that setting the protracted parameters works as expected."""
ct = CoalescenceTree(self.dst[0])
with self.assertRaises(ValueError):
ct._set_protracted_parameters(0.1, 100)
ct = CoalescenceTree(self.dst[1])
ct._set_protracted_parameters(10, 100)
self.assertEqual((10.0, 100.0), ct.protracted_parameters[0])
ct.protracted_parameters = []
for min_proc, max_proc in [[200, 5000], [80, 50], [200, 11000]]:
with self.assertRaises(ValueError):
ct._check_protracted_parameters(min_proc, max_proc)
with self.assertRaises(ValueError):
ct._set_protracted_parameters(min_proc, max_proc)
with self.assertRaises(ValueError):
ct.add_protracted_parameters(min_proc, max_proc)
ct._set_protracted_parameters(50, 5000)
self.assertEqual((50.0, 5000.0), ct.protracted_parameters[0])
ct.protracted_parameters = []
ct._set_protracted_parameters()
self.assertEqual((0.0, 0.0), ct.protracted_parameters[0])
def testSetSampleFile(self):
"""Tests that the sample file is correctly set."""
ct = CoalescenceTree(self.dst[0])
for file in ["notafile.tif", os.path.join("sample", "sample.db")]:
with self.assertRaises(IOError):
ct._set_sample_file(file)
ct._set_sample_file()
self.assertEqual("null", ct.sample_file)
expected_file = os.path.join("sample", "SA_sample_coarse.tif")
ct._set_sample_file(expected_file)
self.assertEqual(expected_file, ct.sample_file)
def testSetTimes(self):
"""Tests that times are correctly set."""
ct = CoalescenceTree(self.dst[0])
ct._set_times(None)
self.assertEqual(0.0, ct.times[0])
with self.assertRaises(TypeError):
ct.add_times(0.5)
with self.assertRaises(TypeError):
ct.add_times([0.2, 0.5, "string"])
ct.times = None
ct.add_times([0.2, 0.5, 10])
self.assertEqual([0.0, 0.2, 0.5, 10.0], ct.times)
ct.times = None
ct._set_times(0.2)
self.assertEqual([0.0, 0.2], ct.times)
ct.times = None
ct._set_times([0.1, 0.5, 10.0])
self.assertEqual([0.0, 0.1, 0.5, 10.0], ct.times)
class TestCoalescenceTreeParameters(unittest.TestCase):
"""Tests that parameters are correctly obtained from the databases and the relevant errors are raised."""
def testCommunityParameters1(self):
"""Tests the community parameters make sense in a very simple community."""
shutil.copyfile(os.path.join("sample", "sample3.db"), os.path.join("output", "temp_sample3.db"))
t = CoalescenceTree(os.path.join("output", "temp_sample3.db"), logging_level=50)
self.assertEqual([], t.get_metacommunity_references())
self.assertEqual([1], t.get_community_references())
params = t.get_community_parameters(1)
expected_dict = {
"speciation_rate": 0.001,
"time": 0.0,
"fragments": 0,
"metacommunity_reference": 0,
"min_speciation_gen": 100.0,
"max_speciation_gen": 10000.0,
}
self.assertEqual(expected_dict, params)
with self.assertRaises(sqlite3.Error):
t.get_metacommunity_parameters(1)
with self.assertRaises(KeyError):
t.get_community_parameters(2)
with self.assertRaises(KeyError):
t.get_community_reference(0.1, 0.0, 0, 0, 0.0, min_speciation_gen=100.0, max_speciation_gen=10000.0)
with self.assertRaises(KeyError):
_ = t.get_community_reference(speciation_rate=0.001, time=0.0, fragments=False)
ref = t.get_community_reference(
speciation_rate=0.001, time=0.0, fragments=False, min_speciation_gen=100.0, max_speciation_gen=10000.0
)
self.assertEqual(1, ref)
self.assertEqual(expected_dict, t.get_community_parameters(ref))
t.wipe_data()
with self.assertRaises(IOError):
t.get_community_parameters_pd()
def testCommunityParameters2(self):
"""Tests the community parameters make sense in a very simple community."""
t = CoalescenceTree(os.path.join("sample", "sample4.db"))
self.assertEqual([1, 2, 3, 4, 5], t.get_community_references())
expected_params1 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 0}
expected_params2 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 1}
expected_params3 = {"speciation_rate": 0.2, "time": 0.0, "fragments": 0, "metacommunity_reference": 1}
expected_params4 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 2}
expected_params5 = {"speciation_rate": 0.2, "time": 0.0, "fragments": 0, "metacommunity_reference": 2}
expected_meta_params1 = {
"speciation_rate": 0.001,
"metacommunity_size": 10000.0,
"option": "simulated",
"external_reference": 0,
}
expected_meta_params2 = {
"speciation_rate": 0.001,
"metacommunity_size": 10000.0,
"option": "analytical",
"external_reference": 0,
}
params1 = t.get_community_parameters(1)
params2 = t.get_community_parameters(2)
params3 = t.get_community_parameters(3)
params4 = t.get_community_parameters(4)
params5 = t.get_community_parameters(5)
params6 = t.get_metacommunity_parameters(1)
params7 = t.get_metacommunity_parameters(2)
self.assertEqual([1, 2], t.get_metacommunity_references())
self.assertEqual(expected_params1, params1)
self.assertEqual(expected_params2, params2)
self.assertEqual(expected_params3, params3)
self.assertEqual(expected_params4, params4)
self.assertEqual(expected_params5, params5)
self.assertEqual(expected_meta_params1, params6)
self.assertEqual(expected_meta_params2, params7)
with self.assertRaises(KeyError):
t.get_community_parameters(6)
with self.assertRaises(KeyError):
t.get_metacommunity_parameters(3)
ref1 = t.get_community_reference(speciation_rate=0.1, time=0.0, fragments=False)
with self.assertRaises(KeyError):
t.get_community_reference(
speciation_rate=0.1, time=0.0, fragments=False, min_speciation_gen=0.1, max_speciation_gen=10000.0
)
ref2 = t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="simulated",
)
with self.assertRaises(KeyError):
t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.01,
metacommunity_option="simulated",
)
ref3 = t.get_community_reference(
speciation_rate=0.2,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="simulated",
)
ref4 = t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="analytical",
)
ref5 = t.get_community_reference(
speciation_rate=0.2,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="analytical",
)
self.assertEqual(1, ref1)
self.assertEqual(2, ref2)
self.assertEqual(3, ref3)
self.assertEqual(4, ref4)
self.assertEqual(5, ref5)
expected_community_params_list = []
for reference in t.get_community_references():
params = t.get_community_parameters(reference)
params["reference"] = reference
expected_community_params_list.append(params)
expected_community_params = pd.DataFrame(expected_community_params_list)
actual_output = t.get_community_parameters_pd()
assert_frame_equal(expected_community_params, actual_output, check_like=True)
def testIsComplete(self):
"""Tests sims are correctly identified as complete."""
t = CoalescenceTree(os.path.join("sample", "sample4.db"))
self.assertTrue(t.is_complete)
class TestCoalescenceTreeAnalysis(unittest.TestCase):
"""Tests analysis is performed correctly"""
@classmethod
def setUpClass(cls):
"""Sets up the Coalescence object test case."""
dst1 = os.path.join("output", "sampledb0.db")
for i in range(0, 11):
dst = os.path.join("output", "sampledb{}.db".format(i))
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(os.path.join("sample", "sample.db"), dst)
shutil.copyfile(os.path.join("sample", "nse_reference.db"), os.path.join("output", "nse_reference1.db"))
random.seed(2)
cls.test = CoalescenceTree(dst1, logging_level=50)
cls.test.clear_calculations()
cls.test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
cls.test.calculate_fragment_richness()
cls.test.calculate_fragment_octaves()
cls.test.calculate_octaves_error()
cls.test.calculate_alpha_diversity()
cls.test.calculate_beta_diversity()
cls.test2 = CoalescenceTree()
cls.test2.set_database(os.path.join("sample", "sample_nofrag.db"))
dstx = os.path.join("output", "sampledbx.db")
shutil.copyfile(dst1, dstx)
c = CoalescenceTree(dstx)
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_goodness_of_fit()
@classmethod
def tearDownClass(cls):
"""
Removes the files from output."
"""
cls.test.clear_calculations()
def testComparisonDataNoExistError(self):
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.import_comparison_data(os.path.join("sample", "doesnotexist.db"))
def testFragmentOctaves(self):
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'P09' AND octave == 0"
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 7, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'P09' AND octave == 0 "
" AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 7, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'cerrogalera' AND octave == 1 "
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 3, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'whole' AND octave == 1 "
" AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 221, msg="Fragment octaves not correctly calculated.")
def testFragmentAbundances(self):
"""
Tests that fragment abundances are produced properly by the fragment detection functions.
"""
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'P09' " " AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'P09' " " AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'cerrogalera' "
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
def testSpeciesAbundances(self):
"""Tests that the produced species abundances are correct by comparing species richness."""
num = self.test.cursor.execute(
"SELECT COUNT(species_id) FROM SPECIES_ABUNDANCES WHERE community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 1029, msg="Species abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(species_id) FROM SPECIES_ABUNDANCES WHERE community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 884, msg="Species abundances not correctly calculated.")
def testGetOctaves(self):
"""Tests getting the octaves."""
c = CoalescenceTree(os.path.join("output", "sampledb4.db"))
c.clear_calculations()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_richness()
self.assertEqual([[0, 585], [1, 231], [2, 59], [3, 5]], c.get_octaves(1))
c = CoalescenceTree(os.path.join("output", "sampledb4.db"))
c.clear_calculations()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_richness()
actual = c.get_octaves_pd().head()
expected = pd.DataFrame(
[[1, 0, 585], [1, 1, 231], [1, 2, 59], [1, 3, 5], [2, 0, 760]],
columns=["community_reference", "octave", "richness"],
)
assert_frame_equal(actual, expected, check_like=True)
def testSpeciesLocations(self):
"""
Tests that species locations have been correctly assigned.
"""
num = self.test.cursor.execute(
"SELECT species_id FROM SPECIES_LOCATIONS WHERE x==1662 AND y==4359 " " AND community_reference == 1"
).fetchall()
self.assertEqual(len(set(num)), 2, msg="Species locations not correctly assigned")
all_list = self.test.get_species_locations()
select_list = self.test.get_species_locations(community_reference=1)
self.assertListEqual([1, 1662, 4359, 1], all_list[0])
self.assertListEqual([1, 1662, 4359], select_list[0])
def testAlphaDiversity(self):
"""
Tests that alpha diversity is correctly calculated and fetched for each parameter reference
"""
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.get_alpha_diversity_pd()
self.assertEqual(9, self.test.get_alpha_diversity(1))
self.assertEqual(10, self.test.get_alpha_diversity(2))
expected_alphas_list = []
for reference in self.test.get_community_references():
expected_alphas_list.append(
{"community_reference": reference, "alpha_diversity": self.test.get_alpha_diversity(reference)}
)
expected_alphas = pd.DataFrame(expected_alphas_list).reset_index(drop=True)
actual_alphas = self.test.get_alpha_diversity_pd().reset_index(drop=True)
assert_frame_equal(expected_alphas, actual_alphas, check_like=True)
def testBetaDiversity(self):
"""
Tests that beta diversity is correctly calculated and fetched for the reference
"""
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.get_beta_diversity_pd()
self.assertAlmostEqual(98.111111111, self.test.get_beta_diversity(1), places=5)
self.assertAlmostEqual(102.8, self.test.get_beta_diversity(2), places=5)
expected_betas_list = []
for reference in self.test.get_community_references():
expected_betas_list.append(
{"community_reference": reference, "beta_diversity": self.test.get_beta_diversity(reference)}
)
expected_betas = pd.DataFrame(expected_betas_list).reset_index(drop=True)
actual_betas = self.test.get_beta_diversity_pd().reset_index(drop=True)
assert_frame_equal(expected_betas, actual_betas, check_like=True)
def testGetNumberIndividuals(self):
"""Tests that the number of individuals is obtained correctly."""
c = CoalescenceTree(os.path.join("output", "sampledb7.db"))
self.assertEqual(1504, c.get_number_individuals(community_reference=1))
self.assertEqual(12, c.get_number_individuals(fragment="P09", community_reference=1))
c.wipe_data()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
with self.assertRaises(IOError):
c.get_number_individuals(fragment="none")
with self.assertRaises(IOError):
c.get_number_individuals()
def testGetFragmentAbundances(self):
"""Tests that fragment abundances are correctly obtained."""
c = CoalescenceTree(os.path.join("sample", "sample3.db"))
with self.assertRaises(IOError):
c.get_fragment_abundances(fragment="P09", reference=1)
with self.assertRaises(IOError):
c.get_fragment_abundances_pd()
abundances = self.test.get_fragment_abundances(fragment="P09", reference=1)
expected_abundances = [[302, 1], [303, 1], [304, 1], [305, 1], [306, 1], [307, 1], [546, 2], [693, 1], [732, 3]]
self.assertEqual(expected_abundances, abundances[:10])
all_abundances = self.test.get_all_fragment_abundances()
expected_abundances2 = [
[1, "P09", 302, 1],
[1, "P09", 303, 1],
[1, "P09", 304, 1],
[1, "P09", 305, 1],
[1, "P09", 306, 1],
[1, "P09", 307, 1],
[1, "P09", 546, 2],
[1, "P09", 693, 1],
[1, "P09", 732, 3],
[1, "cerrogalera", 416, 1],
]
self.assertEqual(expected_abundances2, all_abundances[:10])
df = pd.DataFrame(
expected_abundances2, columns=["community_reference", "fragment", "species_id", "no_individuals"]
)
actual_df = self.test.get_fragment_abundances_pd().head(n=10)
assert_frame_equal(df, actual_df, check_like=True)
def testGetFragmentListErrors(self):
"""Tests the error is raised when obtaining fragment list."""
c = CoalescenceTree(os.path.join("output", "sampledb8.db"))
c.wipe_data()
with self.assertRaises(IOError):
c.get_fragment_list()
def testClearGoodnessFit(self):
"""Tests that goodness of fit are correctly cleared."""
c = CoalescenceTree(os.path.join("output", "sampledbx.db"))
exec_command = "SELECT * FROM BIODIVERSITY_METRICS WHERE metric LIKE 'goodness_%'"
self.assertTrue(len(c.cursor.execute(exec_command).fetchall()) >= 1)
c._clear_goodness_of_fit()
self.assertFalse(len(c.cursor.execute(exec_command).fetchall()) >= 1)
def testGetBiodiversityMetrics(self):
"""Tests that biodiversity metrics are correctly obtained from the database."""
c1 = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c1.get_biodiversity_metrics()
c2 = CoalescenceTree(os.path.join("sample", "sample2.db"))
expected_biodiversity_metrics = pd.DataFrame(
[
[1, "fragment_richness", "fragment2", 129.0, np.NaN, np.NaN],
[2, "fragment_richness", "fragment2", 130.0, np.NAN, np.NaN],
[1, "fragment_richness", "fragment1", 174.0, np.NaN, np.NaN],
[2, "fragment_richness", "fragment1", 175.0, np.NaN, np.NaN],
[1, "fragment_richness", "whole", 1163.0, np.NaN, np.NaN],
[2, "fragment_richness", "whole", 1170.0, np.NaN, np.NaN],
],
columns=["community_reference", "metric", "fragment", "value", "simulated", "actual"],
).reset_index(drop=True)
actual_biodiversity_metrics = c2.get_biodiversity_metrics().reset_index(drop=True).fillna(value=np.nan)
assert_frame_equal(expected_biodiversity_metrics, actual_biodiversity_metrics)
def testRaisesErrorNoFragmentsAlpha(self):
"""
Tests that an error is raised when alpha diversity is calculated without any fragment abundance data
"""
with self.assertRaises(IOError):
self.test2.calculate_alpha_diversity()
def testRaisesErrorNoFragmentsBeta(self):
"""
Tests that an error is raised when alpha diversity is calculated without any fragment abundance data
"""
with self.assertRaises(IOError):
self.test2.calculate_beta_diversity()
def testRaisesErrorNoFragmentsRichness(self):
"""
Tests that an error is raised when fragment richness is calculated without any fragment abundance data
"""
with self.assertRaises(IOError):
self.test2.calculate_fragment_richness()
def testRaisesErrorNoFragmentsOctaves(self):
"""
Tests that an error is raised when fragment richness is calculated without any fragment abundance data
"""
with self.assertRaises(IOError):
self.test2.calculate_fragment_octaves()
@unittest.skipIf(sys.version[0] != "3", "Skipping Python 3.x tests")
def testModelFitting2(self):
"""
Tests that the goodness-of-fit calculations are correctly performed.
"""
random.seed(2)
self.test.calculate_goodness_of_fit()
self.assertAlmostEqual(self.test.get_goodness_of_fit(), 0.30140801329929373, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_octaves(), 0.0680205429120108, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_richness(), 0.9244977999898334, places=6)
@unittest.skipIf(sys.version[0] == "3", "Skipping Python 2.x tests")
def testModelFitting3(self):
"""
Tests that the goodness-of-fit calculations are correctly performed.
"""
random.seed(2)
self.test.calculate_goodness_of_fit()
self.assertAlmostEqual(self.test.get_goodness_of_fit(), 0.30140801329929373, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_octaves(), 0.0680205429120108, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_richness(), 0.9244977999898334, places=6)
def testErrorIfNotApplied(self):
"""Tests that an error is raised if outputting is attempted without applying any community parameters."""
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(RuntimeError):
c.output()
def testFragmentNumbersMatching(self):
"""Checks behaviour when matching fragment numbers."""
test = CoalescenceTree(os.path.join("output", "sampledb1.db"), logging_level=50)
test.clear_calculations()
with self.assertRaises(RuntimeError):
test._check_fragment_numbers_match()
with self.assertRaises(ValueError):
test.calculate_fragment_abundances()
test._check_fragment_numbers_match()
test.comparison_file = os.path.join("sample", "PlotBiodiversityMetrics.db")
self.assertTrue(test._check_fragment_numbers_match())
test.fragment_abundances.pop(0)
self.assertFalse(test._check_fragment_numbers_match())
def testFragmentNumbersEqualisation(self):
"""Checks behaviour when equalising fragment numbers."""
test = CoalescenceTree(os.path.join("output", "sampledb2.db"), logging_level=50)
test.clear_calculations()
test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
test.calculate_fragment_richness()
self.test._equalise_fragment_number("notafrag", 1)
test.fragment_abundances[0][2] += 1000
test._equalise_fragment_number("P09", 1)
self.assertTrue(test._check_fragment_numbers_match())
def testFragmentNumbersErrors(self):
"""Checks behaviour when equalising fragment numbers."""
test = CoalescenceTree(os.path.join("output", "sampledb3.db"), logging_level=50)
test.clear_calculations()
test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
test.comparison_abundances = None
with self.assertRaises(ValueError):
test._equalise_all_fragment_numbers()
def testAdjustBiodiversityMetrics(self):
"""Checks that biodiversity metrics are correctly adjusted."""
test = CoalescenceTree(os.path.join("output", "sampledb5.db"), logging_level=50)
test.clear_calculations()
test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
test.adjust_data()
def testComparisonOctavesModification(self):
"""Tests that the comparison database is modified."""
test = CoalescenceTree(os.path.join("output", "sampledb6.db"), logging_level=50)
dst = os.path.join("output", "PlotBiodiversityMetricsNoAlpha2.db")
shutil.copy(os.path.join("sample", "PlotBiodiversityMetricsNoAlpha.db"), dst)
test.import_comparison_data(dst)
test.calculate_comparison_octaves(store=True)
self.assertTrue(os.path.exists(dst))
@unittest.skipIf(sys.version[0] == "2", "Skipping Python 3.x tests")
def testDownsamplingAndRevert(self):
"""Tests that downsampling works as intended and can be reverted."""
c = CoalescenceTree(os.path.join("output", "sampledb9.db"))
random.seed(a=10, version=3)
original_individuals = c.get_number_individuals()
original_richness = c.get_species_richness_pd()
c.wipe_data()
with self.assertRaises(ValueError):
c.downsample(sample_proportion=2.0)
c.downsample(sample_proportion=0.1)
c.set_speciation_parameters([0.1, 0.2])
c.apply()
new_individuals = c.get_number_individuals()
self.assertEqual(1452, new_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "sampledb9.db"))
c.revert_downsample()
c.wipe_data()
c.set_speciation_parameters([0.1, 0.2])
c.apply()
final_individuals = c.get_number_individuals()
assert_frame_equal(original_richness, c.get_species_richness_pd())
self.assertEqual(original_individuals, final_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertFalse(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
# Now test with NSE sim to ensure correct sampling
c = CoalescenceTree(os.path.join("output", "nse_reference1.db"))
nse_richness = c.get_species_richness_pd()
nse_no_individuals = c.get_number_individuals()
c.wipe_data()
c.downsample(sample_proportion=0.1)
c.set_speciation_parameters([0.000001, 0.999999])
c.apply()
new_no_individuals = c.get_number_individuals()
self.assertAlmostEqual(new_no_individuals / nse_no_individuals, 0.1, 5)
self.assertEqual(1000, c.get_species_richness(reference=2))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "nse_reference1.db"))
c.revert_downsample()
c.wipe_data()
c.set_speciation_parameters([0.000001, 0.999999])
c.apply_incremental()
c.set_speciation_parameters([0.5])
c.apply()
actual_richness = c.get_species_richness_pd()
assert_frame_equal(nse_richness, actual_richness)
self.assertEqual(nse_no_individuals, c.get_number_individuals())
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertFalse(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
with self.assertRaises(IOError):
c.revert_downsample()
@unittest.skipIf(sys.version[0] == "2", "Skipping Python 3.x tests")
def testDownsamplingByLocationAndRevert(self):
"""Tests that downsampling works as intended and can be reverted."""
c = CoalescenceTree(os.path.join("output", "sampledb10.db"))
random.seed(a=10, version=3)
original_individuals = c.get_number_individuals()
original_richness = c.get_species_richness_pd()
c.wipe_data()
with self.assertRaises(ValueError):
c.downsample_at_locations(fragment_csv=os.path.join("sample", "FragmentsTestFail1.csv"))
with self.assertRaises(IOError):
c.downsample_at_locations(fragment_csv="not_a_file.csv")
c.downsample_at_locations(fragment_csv=os.path.join("sample", "FragmentsTest3.csv"))
c.set_speciation_parameters([0.1, 0.2])
c.apply()
new_individuals = c.get_number_individuals()
self.assertEqual(2, new_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "sampledb10.db"))
c.revert_downsample()
c.wipe_data()
c.set_speciation_parameters([0.1, 0.2])
c.apply()
final_individuals = c.get_number_individuals()
assert_frame_equal(original_richness, c.get_species_richness_pd())
self.assertEqual(original_individuals, final_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertFalse(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "sampledb10.db"))
c.wipe_data()
c.downsample_at_locations(fragment_csv=os.path.join("sample", "FragmentsTest4.csv"), ignore_errors=True)
c.set_speciation_parameters([0.1, 0.2])
c.apply()
new_individuals = c.get_number_individuals()
self.assertEqual(3, new_individuals)
class TestCoalescenceTreeWriteCsvs(unittest.TestCase):
"""Tests that csvs are correctly outputted."""
@classmethod
def setUpClass(cls):
"""Creates the CoalescenceTree object."""
cls.c = CoalescenceTree(os.path.join("sample", "nse_reference.db"))
def testWriteCommunityParameterToCsv(self):
"""Tests that community parameters are correctly written to a csv."""
output_csv = os.path.join("output", "community_parameters1.csv")
self.c.write_to_csv(output_csv, "COMMUNITY_PARAMETERS")
self.assertTrue(os.path.exists(output_csv))
import csv
if sys.version_info[0] < 3: # pragma: no cover
infile = open(output_csv, "rb")
else:
infile = open(output_csv, "r")
expected_output = [
["reference", "speciation_rate", "time", "fragments", "metacommunity_reference"],
["1", "1e-06", "0.0", "0", "0"],
["2", "0.99999", "0.0", "0", "0"],
["3", "0.5", "0.0", "0", "0"],
]
actual_output = []
with infile as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
actual_output.append(row)
self.assertEqual(expected_output, actual_output)
with self.assertRaises(IOError):
self.c.write_to_csv(output_csv, "COMMUNITY_PARAMETERS")
with self.assertRaises(KeyError):
self.c.write_to_csv("notacsv.csv", "NOTATABLE")
def testWritesAllCsvs(self):
"""Tests that all csvs write to the output correctly."""
output_dir = os.path.join("output", "csvdir")
if os.path.exists(output_dir):
os.remove(output_dir)
self.c.write_all_to_csvs(output_dir, "out1")
expected_tables = ["COMMUNITY_PARAMETERS", "SIMULATION_PARAMETERS", "SPECIES_ABUNDANCES", "SPECIES_LIST"]
for table in expected_tables:
self.assertTrue(os.path.exists(os.path.join(output_dir, "out1_{}.csv".format(table))))
for file in os.listdir(output_dir):
if ".csv" in file:
self.assertIn(file, ["out1_{}.csv".format(x) for x in expected_tables])
self.c.write_all_to_csvs(output_dir, "out2.csv")
for table in expected_tables:
self.assertTrue(os.path.exists(os.path.join(output_dir, "out2_{}.csv".format(table))))
self.c.write_all_to_csvs(output_dir, "out3.")
for table in expected_tables:
self.assertTrue(os.path.exists(os.path.join(output_dir, "out3_{}.csv".format(table))))
class TestCoalescenceTreeSpeciesDistances(unittest.TestCase):
"""Tests analysis is performed correctly."""
@classmethod
def setUpClass(cls):
"""
Sets up the Coalescence object test case.
"""
dst = os.path.join("output", "sampledb1.db")
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(os.path.join("sample", "sample.db"), dst)
cls.test = CoalescenceTree(dst)
cls.test.clear_calculations()
cls.test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
cls.test.calculate_species_distance_similarity()
def testSpeciesDistanceSimilarity(self):
"""
Tests that the species distance similarity function works as intended.
"""
mean = self.test.cursor.execute(
"SELECT value FROM BIODIVERSITY_METRICS WHERE community_reference == 1 AND "
"metric == 'mean_distance_between_individuals'"
).fetchone()[0]
self.assertAlmostEqual(mean, 5.423769507803121, places=5)
species_distances = self.test.get_species_distance_similarity(community_reference=1)
# for distance, similar in species_distances:
# self.assertLessEqual(similar, dissimilar)
self.assertListEqual(species_distances[0], [0, 11])
self.assertListEqual(species_distances[1], [1, 274])
self.assertListEqual(species_distances[2], [2, 289])
class TestCoalescenceTreeAnalyseIncorrectComparison(unittest.TestCase):
"""
Tests errors are raised correctly for incorrect comparison data.
"""
@classmethod
def setUpClass(cls):
"""
Sets up the Coalescence object test case.
"""
random.seed(10)
dst = os.path.join("output", "sampledb2.db")
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(os.path.join("sample", "sample.db"), dst)
cls.test = CoalescenceTree(logging_level=40)
cls.test.set_database(dst)
cls.test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetricsNoAlpha.db"))
cls.test.calculate_comparison_octaves(False)
cls.test.clear_calculations()
cls.test.calculate_fragment_richness()
cls.test.calculate_fragment_octaves()
cls.test.calculate_octaves_error()
cls.test.calculate_alpha_diversity()
cls.test.calculate_alpha_diversity()
cls.test.calculate_beta_diversity()
cls.test2 = CoalescenceTree()
cls.test2.set_database(os.path.join("sample", "sample_nofrag.db"))
@classmethod
def tearDownClass(cls):
"""
Removes the files from output."
"""
cls.test.clear_calculations()
def testRaisesErrorMismatchParameters(self):
"""
Tests that an error is raised when there is a parameter mismatch
"""
with self.assertRaises(ValueError):
self.test.calculate_goodness_of_fit()
class TestSimulationAnalysisTemporal(unittest.TestCase):
"""Tests that applying multiple times works as expected."""
@classmethod
def setUpClass(cls):
"""Generates the analysis object."""
src = os.path.join("sample", "sample2.db")
dst = os.path.join("output", "sample2.db")
if not os.path.exists(dst):
shutil.copy(src, dst)
cls.tree = CoalescenceTree()
cls.tree.set_database(dst)
cls.tree.wipe_data()
def testTimesWrongFormatError(self):
"""Tests that an error is raised when the times are in the wrong format."""
with self.assertRaises(TypeError):
self.tree.set_speciation_parameters([0.4, 0.6], times=[0.1, 0.2, "notafloat"])
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
self.tree.set_speciation_parameters([0.4, 0.6], times="notafloat")
self.tree.times = []
self.tree.set_speciation_parameters([0.4, 0.6], times=[0, 1, 10])
self.assertEqual([0.0, 1.0, 10.0], self.tree.times)
class TestSimulationAnalysis(unittest.TestCase):
"""
Tests that the simulation can perform all required analyses, and that the correct errors are thrown if the object
does not exist.
"""
@classmethod
def setUpClass(cls):
"""Copies the sample databases and applies a basic set of community parameters."""
src = os.path.join("sample", "sample2.db")
dst = os.path.join("output", "sample2.db")
if os.path.exists(dst):
os.remove(dst)
shutil.copy(src, dst)
cls.tree = CoalescenceTree(logging_level=50)
cls.tree.set_database(dst)
cls.tree.wipe_data()
cls.tree.set_speciation_parameters(
speciation_rates=[0.5, 0.7],
record_spatial="T",
record_fragments=os.path.join("sample", "FragmentsTest.csv"),
sample_file=os.path.join("sample", "SA_samplemaskINT.tif"),
)
cls.tree.apply()
cls.tree.calculate_fragment_richness()
cls.tree.calculate_fragment_octaves()
np.random.seed(100)
def testSetDatabaseErrors(self):
"""Tests that the set database errors are correctly raised."""
sim = Simulation()
c = CoalescenceTree()
with self.assertRaises(RuntimeError):
c.set_database(sim)
c = CoalescenceTree()
with self.assertRaises(IOError):
c.set_database(os.path.join("sample", "failsampledoesntexist.db"))
def testFragmentConfigNoExistError(self):
"""Tests that an error is raised if the fragment config file does not exist."""
tree = CoalescenceTree(self.tree.file)
with self.assertRaises(IOError):
tree.set_speciation_parameters(
speciation_rates=[0.5, 0.7],
record_spatial="T",
record_fragments=os.path.join("sample", "notafragmentconfig.csv"),
sample_file=os.path.join("sample", "SA_samplemaskINT.tif"),
)
with self.assertRaises(IOError):
tree.set_speciation_parameters(
speciation_rates=[0.5, 0.7],
record_spatial="T",
record_fragments=os.path.join("sample", "example_historical_fine.tif"),
sample_file=os.path.join("sample", "SA_samplemaskINT.tif"),
)
def testReadsFragmentsRichness(self):
"""
Tests that the fragment richness can be read correctly
"""
sim_params = self.tree.get_simulation_parameters()
expected_params = dict(
seed=9,
task=1,
output_dir="output",
speciation_rate=0.5,
sigma=2.828427,
tau=2.0,
deme=1,
sample_size=0.1,
max_time=2.0,
dispersal_relative_cost=1.0,
min_num_species=1,
habitat_change_rate=0.0,
gen_since_historical=200.0,
time_config_file="null",
coarse_map_file="sample/SA_sample_coarse.tif",
coarse_map_x=35,
coarse_map_y=41,
coarse_map_x_offset=11,
coarse_map_y_offset=14,
coarse_map_scale=1.0,
fine_map_file="sample/SA_sample_fine.tif",
fine_map_x=13,
fine_map_y=13,
fine_map_x_offset=0,
fine_map_y_offset=0,
sample_file="sample/SA_samplemaskINT.tif",
grid_x=13,
grid_y=13,
sample_x=13,
sample_y=13,
sample_x_offset=0,
sample_y_offset=0,
historical_coarse_map="none",
historical_fine_map="none",
sim_complete=1,
dispersal_method="normal",
m_probability=0.0,
cutoff=0.0,
landscape_type="closed",
protracted=0,
min_speciation_gen=0.0,
max_speciation_gen=0.0,
dispersal_map="none",
)
for key in sim_params.keys():
self.assertEqual(
sim_params[key],
expected_params[key],
msg="Error in {}: {} != {}".format(key, sim_params[key], expected_params[key]),
)
fragment2_richness = ["fragment2", 1, 129]
self.assertEqual(self.tree.get_fragment_richness(fragment="fragment2", reference=1), 129)
self.assertEqual(self.tree.get_fragment_richness(fragment="fragment1", reference=2), 175)
octaves = self.tree.get_fragment_richness()
self.assertListEqual(fragment2_richness, [list(x) for x in octaves if x[0] == "fragment2" and x[1] == 1][0])
expected_fragment_richness = []
for reference in self.tree.get_community_references():
for fragment in self.tree.get_fragment_list(reference):
fragment_richness = self.tree.get_fragment_richness(fragment=fragment, reference=reference)
expected_fragment_richness.append(
{"fragment": fragment, "community_reference": reference, "fragment_richness": fragment_richness}
)
expected_fragment_richness_df = (
pd.DataFrame(expected_fragment_richness)
.sort_values(by=["fragment", "community_reference"])
.reset_index(drop=True)
)
actual_fragment_richness = self.tree.get_fragment_richness_pd().reset_index(drop=True)
assert_frame_equal(expected_fragment_richness_df, actual_fragment_richness, check_like=True)
def testGetsFragmentList(self):
"""
Tests that fetching the list of fragments from FRAGMENT_ABUNDANCES is as expected
"""
fragment_list = self.tree.get_fragment_list()
expected_list = ["fragment1", "fragment2"]
self.assertListEqual(expected_list, fragment_list)
def testReadsFragmentAbundances(self):
"""
Tests that the fragment abundances are correctly read
"""
expected_abundances = [
[610, 1],
[611, 1],
[612, 1],
[613, 1],
[614, 1],
[615, 1],
[616, 1],
[617, 1],
[618, 1],
[619, 1],
]
actual_abundances = self.tree.get_species_abundances(fragment="fragment2", reference=1)
for i, each in enumerate(expected_abundances):
self.assertListEqual(actual_abundances[i], each)
with self.assertRaises(ValueError):
self.tree.get_species_abundances(fragment="fragment2")
expected_fragment_abundances_list = []
for reference in self.tree.get_community_references():
for fragment in self.tree.get_fragment_list(reference):
fragment_abundances = self.tree.get_fragment_abundances(fragment=fragment, reference=reference)
for species_id, abundance in fragment_abundances:
expected_fragment_abundances_list.append(
{
"fragment": fragment,
"community_reference": reference,
"species_id": species_id,
"no_individuals": abundance,
}
)
expected_fragment_abundances = (
pd.DataFrame(expected_fragment_abundances_list)
.sort_values(by=["fragment", "community_reference", "species_id"])
.reset_index(drop=True)
)
actual_fragment_abundances = (
self.tree.get_fragment_abundances_pd()
.sort_values(by=["fragment", "community_reference", "species_id"])
.reset_index(drop=True)
)
assert_frame_equal(expected_fragment_abundances, actual_fragment_abundances, check_like=True)
def testFragmentRichnessRaiseError(self):
"""
Tests that the correct errors are raised when no fragment exists with that name, or with the specified
speciation rate, or time. Also checks SyntaxErrors and sqlite3.Errors when no FRAGMENT_RICHNESS table
exists.
"""
failtree = CoalescenceTree()
failtree.set_database(os.path.join("sample", "failsample.db"))
with self.assertRaises(IOError):
failtree.get_fragment_richness()
with self.assertRaises(IOError):
failtree.get_fragment_richness_pd()
with self.assertRaises(IOError):
self.tree.get_fragment_richness(fragment="fragment4", reference=1)
with self.assertRaises(SyntaxError):
self.tree.get_fragment_richness(fragment="fragment4")
with self.assertRaises(SyntaxError):
self.tree.get_fragment_richness(reference=1)
def testReadsFragmentOctaves(self):
"""
Tests that the fragment octaves can be read correctly.
"""
octaves = self.tree.get_fragment_octaves(fragment="fragment2", reference=1)
octaves2 = self.tree.get_fragment_octaves(fragment="fragment1", reference=1)
all_octaves = self.tree.get_fragment_octaves()
desired = ["fragment1", 1, 0, 173]
self.assertListEqual([0, 128], octaves[0])
self.assertListEqual([0, 173], octaves2[0])
self.assertListEqual(desired, [x for x in all_octaves if x[0] == "fragment1" and x[1] == 1 and x[2] == 0][0])
expected_fragment_octaves_list = []
for reference in self.tree.get_community_references():
fragment_list = self.tree.get_fragment_list(reference)
fragment_list.append("whole")
for fragment in fragment_list:
try:
octaves = self.tree.get_fragment_octaves(fragment=fragment, reference=reference)
for octave, richness in octaves:
expected_fragment_octaves_list.append(
{
"fragment": fragment,
"community_reference": reference,
"octave": octave,
"richness": richness,
}
)
except RuntimeError:
continue
expected_fragment_octaves = (
pd.DataFrame(expected_fragment_octaves_list)
.sort_values(["fragment", "community_reference", "octave"], axis=0)
.reset_index(drop=True)
)
actual_fragment_octaves = (
self.tree.get_fragment_octaves_pd()
.sort_values(["fragment", "community_reference", "octave"], axis=0)
.reset_index(drop=True)
)
assert_frame_equal(expected_fragment_octaves, actual_fragment_octaves, check_like=True)
def testFragmentOctavesRaiseError(self):
"""
Tests that the correct errors are raised for different situations for reading fragment octaves
"""
failtree = CoalescenceTree()
try:
failtree.set_database("sample/failsample.db")
except sqlite3.Error:
pass
with self.assertRaises(sqlite3.Error):
failtree.get_fragment_octaves(fragment="fragment4", reference=100)
with self.assertRaises(RuntimeError):
self.tree.get_fragment_octaves(fragment="fragment4", reference=100)
with self.assertRaises(SyntaxError):
self.tree.get_fragment_octaves(fragment="fragment4")
with self.assertRaises(SyntaxError):
self.tree.get_fragment_octaves(reference=100)
def testFragmentSampling(self):
"""
Tests that sampling from fragments is accurate.
"""
self.assertEqual(
10,
self.tree.sample_fragment_richness(
fragment="fragment1", number_of_individuals=10, n=1, community_reference=2
),
)
self.assertEqual(
10,
self.tree.sample_fragment_richness(
fragment="fragment2", number_of_individuals=10, n=10, community_reference=2
),
)
def testLandscapeSampling(self):
"""Tests that the sampling from the landscape works as intended."""
number_dict = {"fragment1": 3, "fragment2": 10}
np.random.seed(100)
self.assertEqual(
13, self.tree.sample_landscape_richness(number_of_individuals=number_dict, n=1, community_reference=2)
)
self.assertAlmostEqual(
99.9, self.tree.sample_landscape_richness(number_of_individuals=100, n=10, community_reference=1), places=3
)
def testRaisesSamplingErrors(self):
"""Tests that sampling errors are correctly raised"""
number_dict = {"fragment1": 3000000, "fragment2": 10}
with self.assertRaises(KeyError):
self.assertEqual(
13, self.tree.sample_landscape_richness(number_of_individuals=number_dict, n=1, community_reference=2)
)
number_dict2 = {"fragment": 10, "fragment2": 10}
with self.assertRaises(KeyError):
self.assertEqual(
13, self.tree.sample_landscape_richness(number_of_individuals=number_dict2, n=1, community_reference=2)
)
def testSpeciesRichness(self):
"""Tests that the simulation species richness is read correctly."""
actual_species_richness = (
self.tree.get_species_richness_pd().sort_values(by=["community_reference"]).reset_index(drop=True)
)
expected_species_richness_list = []
for reference in self.tree.get_community_references():
expected_species_richness_list.append(
{"community_reference": reference, "richness": self.tree.get_species_richness(reference=reference)}
)
expected_species_richness = pd.DataFrame(expected_species_richness_list)
assert_frame_equal(actual_species_richness, expected_species_richness, check_like=True)
def testOctaves(self):
"""Tests that the simulation octave classes are correctly calculated."""
actual_species_octaves = (
self.tree.get_octaves_pd().sort_values(by=["community_reference", "octave"]).reset_index(drop=True)
)
expected_species_octaves_list = []
for reference in self.tree.get_community_references():
for octave, richness in self.tree.get_octaves(reference):
expected_species_octaves_list.append(
{"community_reference": reference, "octave": octave, "richness": richness}
)
expected_species_octaves = pd.DataFrame(expected_species_octaves_list)
assert_frame_equal(actual_species_octaves, expected_species_octaves, check_like=True)
class TestMetacommunityApplication(unittest.TestCase):
"""
Tests that a metacommunity can be applied correctly under the three different scenarios. Note that this does not
test edge cases, just that the parameters are correctly stored and the different application methods work as
intended.
"""
@classmethod
def setUpClass(cls):
"""Initialises the three database files to use."""
src = os.path.join("sample", "sample.db")
for i in range(6):
dst = os.path.join("output", "sample_{}.db".format(i))
if os.path.exists(dst):
os.remove(dst)
shutil.copy2(src, dst)
def testMetacommunityAddingInvalidParameters(self):
"""Tests that adding invalid parameter for a metacommunity raises the appropriate errors."""
tree = CoalescenceTree(os.path.join("output", "sample_0.db"))
tree.wipe_data()
with self.assertRaises(IOError):
tree.get_metacommunity_parameters_pd()
tree.set_speciation_parameters([0.1, 0.2])
for size, spec, opt, ref in [
[0, 0.1, "simulated", None],
[10, 0.0, "analytical", None],
[None, None, "analytical", None],
[10, 0.0, "path/to/file", None],
[0, 0.0, "path/to/file", None],
[0, 0.0, "path/to/not/a/file.db", 1],
]:
with self.assertRaises(ValueError):
tree.add_metacommunity_parameters(
metacommunity_size=size,
metacommunity_speciation_rate=spec,
metacommunity_option=opt,
metacommunity_reference=ref,
)
with self.assertRaises(IOError):
tree.add_metacommunity_parameters(metacommunity_option="not/a/file/db.db", metacommunity_reference=1)
def testMetacommunitySimulation(self):
"""Tests that a simulated metacommunity works as intended."""
tree = CoalescenceTree(os.path.join("output", "sample_1.db"))
tree.wipe_data()
tree.set_speciation_parameters(
[0.1, 0.2], metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option="simulated"
)
tree.add_metacommunity_parameters(
metacommunity_size=15000, metacommunity_speciation_rate=0.1, metacommunity_option="simulated"
)
tree.add_metacommunity_parameters(
metacommunity_size=100000, metacommunity_speciation_rate=0.001, metacommunity_option="simulated"
)
tree.apply()
params_1 = tree.get_metacommunity_parameters(1)
params_2 = tree.get_metacommunity_parameters(2)
params_3 = tree.get_metacommunity_parameters(3)
self.assertEqual(10000, params_1["metacommunity_size"])
self.assertEqual(0.001, params_1["speciation_rate"])
self.assertEqual("simulated", params_1["option"])
self.assertEqual(0, params_1["external_reference"])
self.assertEqual(15000, params_2["metacommunity_size"])
self.assertEqual(0.1, params_2["speciation_rate"])
self.assertEqual("simulated", params_2["option"])
self.assertEqual(0, params_2["external_reference"])
self.assertEqual(100000, params_3["metacommunity_size"])
self.assertEqual(0.001, params_3["speciation_rate"])
self.assertEqual("simulated", params_3["option"])
self.assertEqual(0, params_3["external_reference"])
self.assertEqual(51, tree.get_species_richness(1))
self.assertEqual(47, tree.get_species_richness(2))
self.assertEqual(681, tree.get_species_richness(3))
self.assertEqual(783, tree.get_species_richness(4))
self.assertEqual(247, tree.get_species_richness(5))
self.assertEqual(241, tree.get_species_richness(6))
expected_metacommunity_parameters_list = []
for reference in tree.get_community_references():
try:
params = tree.get_metacommunity_parameters(reference)
params["reference"] = reference
expected_metacommunity_parameters_list.append(params)
except KeyError:
continue
expected_metacommunity_parameters = pd.DataFrame(expected_metacommunity_parameters_list).sort_values(
["reference"]
)
actual_metacommunity_parameters = tree.get_metacommunity_parameters_pd().sort_values(["reference"])
assert_frame_equal(expected_metacommunity_parameters, actual_metacommunity_parameters, check_like=True)
def testMetacommunityAnalytical(self):
"""Tests that an analytical metacommunity works as intended."""
tree = CoalescenceTree(os.path.join("output", "sample_2.db"))
tree.wipe_data()
tree.set_speciation_parameters(
[0.1, 0.2], metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option="analytical"
)
tree.add_metacommunity_parameters(
metacommunity_size=15000, metacommunity_speciation_rate=0.1, metacommunity_option="analytical"
)
tree.add_metacommunity_parameters(
metacommunity_size=100000, metacommunity_speciation_rate=0.001, metacommunity_option="analytical"
)
tree.apply()
params_1 = tree.get_metacommunity_parameters(1)
params_2 = tree.get_metacommunity_parameters(2)
params_3 = tree.get_metacommunity_parameters(3)
self.assertEqual(10000, params_1["metacommunity_size"])
self.assertEqual(0.001, params_1["speciation_rate"])
self.assertEqual("analytical", params_1["option"])
self.assertEqual(0, params_1["external_reference"])
self.assertEqual(15000, params_2["metacommunity_size"])
self.assertEqual(0.1, params_2["speciation_rate"])
self.assertEqual("analytical", params_2["option"])
self.assertEqual(0, params_2["external_reference"])
self.assertEqual(100000, params_3["metacommunity_size"])
self.assertEqual(0.001, params_3["speciation_rate"])
self.assertEqual("analytical", params_3["option"])
self.assertEqual(0, params_3["external_reference"])
self.assertEqual(51, tree.get_species_richness(1))
self.assertEqual(57, tree.get_species_richness(2))
self.assertEqual(694, tree.get_species_richness(3))
self.assertEqual(760, tree.get_species_richness(4))
self.assertEqual(222, tree.get_species_richness(5))
self.assertEqual(234, tree.get_species_richness(6))
def testMetacommunityExternal(self):
"""Tests that an external metacommunity works as intended."""
tree = CoalescenceTree(os.path.join("output", "sample_3.db"))
tree.wipe_data()
tree.set_speciation_parameters([0.1, 0.2], metacommunity_option=os.path.join("sample", "nse_reference.db"))
tree.add_metacommunity_parameters(
metacommunity_option=os.path.join("sample", "nse_reference.db"), metacommunity_reference=2
)
tree.apply()
params_1 = tree.get_metacommunity_parameters(1)
params_2 = tree.get_metacommunity_parameters(2)
self.assertEqual(0, params_1["metacommunity_size"])
self.assertEqual(0.0, params_1["speciation_rate"])
self.assertEqual(os.path.join("sample", "nse_reference.db"), params_1["option"])
self.assertEqual(1, params_1["external_reference"])
self.assertEqual(0, params_2["metacommunity_size"])
self.assertEqual(0.0, params_2["speciation_rate"])
self.assertEqual(os.path.join("sample", "nse_reference.db"), params_2["option"])
self.assertEqual(2, params_2["external_reference"])
self.assertEqual(1, tree.get_species_richness(1))
self.assertEqual(1, tree.get_species_richness(2))
self.assertEqual(850, tree.get_species_richness(3))
self.assertEqual(975, tree.get_species_richness(4))
def testMetacommunityAnalyticalMethodDetection(self):
"""Tests that the analytical method detection works correctly."""
tree = CoalescenceTree(os.path.join("output", "sample_4.db"))
tree.wipe_data()
tree.set_speciation_parameters(
[0.1, 0.2], metacommunity_size=110000, metacommunity_speciation_rate=0.5, metacommunity_option="none"
)
tree.add_metacommunity_parameters(
metacommunity_speciation_rate=0.5, metacommunity_size=120000, metacommunity_option="none"
)
tree.apply()
params_1 = tree.get_metacommunity_parameters(1)
params_2 = tree.get_metacommunity_parameters(2)
self.assertEqual(110000, params_1["metacommunity_size"])
self.assertEqual(0.5, params_1["speciation_rate"])
self.assertEqual("analytical", params_1["option"])
self.assertEqual(120000, params_2["metacommunity_size"])
self.assertEqual(0.5, params_2["speciation_rate"])
self.assertEqual("analytical", params_2["option"])
def testMetacommunitySimulatedMethodDetection(self):
"""Tests that the simulated method detection works correctly."""
tree = CoalescenceTree(os.path.join("output", "sample_5.db"))
tree.wipe_data()
tree.set_speciation_parameters(
[0.1, 0.2], metacommunity_size=1000, metacommunity_speciation_rate=0.5, metacommunity_option="none"
)
tree.add_metacommunity_parameters(
metacommunity_speciation_rate=0.5, metacommunity_size=2000, metacommunity_option="none"
)
tree.apply()
params_1 = tree.get_metacommunity_parameters(1)
params_2 = tree.get_metacommunity_parameters(2)
self.assertEqual(1000, params_1["metacommunity_size"])
self.assertEqual(0.5, params_1["speciation_rate"])
self.assertEqual("simulated", params_1["option"])
self.assertEqual(2000, params_2["metacommunity_size"])
self.assertEqual(0.5, params_2["speciation_rate"])
self.assertEqual("simulated", params_2["option"])
@skipLongTest
class TestMetacommunityApplicationSpeciesAbundances(unittest.TestCase):
"""Tests that the metacommunity application produces the expected species abundance distribution."""
@classmethod
def setUpClass(cls):
"""Run a non-spatial sim and apply a metacommunity."""
cls.sim = Simulation()
cls.sim.set_simulation_parameters(
seed=11, task=110, output_directory="output", min_speciation_rate=0.1, spatial=False, deme=20541
)
cls.sim.run()
cls.ct = CoalescenceTree(cls.sim)
cls.ct.wipe_data()
cls.ct.set_speciation_parameters(speciation_rates=0.1)
cls.ct.add_metacommunity_parameters(
metacommunity_option="analytical", metacommunity_size=1000000, metacommunity_speciation_rate=0.00005
)
cls.ct.add_metacommunity_parameters(
metacommunity_option="simulated", metacommunity_size=1000000, metacommunity_speciation_rate=0.00005
)
# This just tests that it doesn't take forever and produces a sensible output
cls.ct.add_metacommunity_parameters(
metacommunity_option="analytical", metacommunity_size=1000000000, metacommunity_speciation_rate=0.1
)
cls.ct.apply()
def testRichnessMatchness(self):
"""Tests that the species richness is roughly equivalent between the two methods."""
self.assertAlmostEqual(244, self.ct.get_species_richness(2), delta=10)
self.assertAlmostEqual(self.ct.get_species_richness(1), self.ct.get_species_richness(2), delta=30)
self.assertEqual(5212, self.ct.get_species_richness(3))
def testSpeciesAbundances(self):
"""Tests the species abundance distribution is roughly equivalent between the two methods."""
sad_1 = [x[1] for x in self.ct.get_species_abundances(reference=1)]
sad_2 = [x[1] for x in self.ct.get_species_abundances(reference=2)]
mean_1 = sum(sad_1) / len(sad_1)
mean_2 = sum(sad_2) / len(sad_2)
# Check the mean abundance is roughly equivalent
self.assertAlmostEqual(mean_1, mean_2, delta=10)
# Check that the variances are roughly equivalent
var_list_1 = [abs(x - mean_1) for x in sad_1]
var_list_2 = [abs(x - mean_2) for x in sad_2]
var_1 = sum(var_list_1) / len(var_list_1)
var_2 = sum(var_list_2) / len(var_list_2)
self.assertAlmostEqual(var_1, var_2, delta=5)
expected_abundances_list = []
for reference in self.ct.get_community_references():
for species_id, abundance in self.ct.get_species_abundances(reference=reference):
expected_abundances_list.append(
{"community_reference": reference, "species_id": species_id, "no_individuals": abundance}
)
expected_abundances = pd.DataFrame(expected_abundances_list)
actual_abundances = self.ct.get_species_abundances_pd()
assert_frame_equal(actual_abundances, expected_abundances, check_like=True)
class TestMetacommunityApplicationOrdering(unittest.TestCase):
"""Tests that the ordering of adding parameters to the metacommunity does not matter."""
@classmethod
def setUpClass(cls):
"""Generates the test databases."""
src = os.path.join("sample", "sample3.db")
for i in [1, 2]:
dst = os.path.join("output", "sample_order_{}.db".format(i))
if os.path.exists(dst):
os.remove(dst)
shutil.copy(src, dst)
src = os.path.join("sample", "sample5.db")
for i in range(3, 6):
dst = os.path.join("output", "sample_order_{}.db".format(i))
if os.path.exists(dst):
os.remove(dst)
shutil.copy(src, dst)
cls.c1 = CoalescenceTree(os.path.join("output", "sample_order_1.db"))
cls.c2 = CoalescenceTree(os.path.join("output", "sample_order_2.db"))
cls.proc1 = CoalescenceTree(os.path.join("output", "sample_order_3.db"))
cls.proc2 = CoalescenceTree(os.path.join("output", "sample_order_4.db"))
cls.proc3 = CoalescenceTree(os.path.join("output", "sample_order_5.db"))
cls.c1.set_speciation_parameters(
[0.1, 0.5, 0.9],
metacommunity_speciation_rate=0.001,
metacommunity_option="simulated",
metacommunity_size=10000,
)
cls.c1.apply()
cls.c2.set_speciation_parameters([0.1, 0.5, 0.9])
cls.c2.add_metacommunity_parameters(
metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option="simulated"
)
cls.c2.apply()
cls.proc1.set_speciation_parameters(
[0.1, 0.5, 0.9],
protracted_speciation_min=5,
protracted_speciation_max=1000,
metacommunity_option="simulated",
metacommunity_speciation_rate=0.001,
metacommunity_size=10000,
)
cls.proc1.apply()
cls.proc2.set_speciation_parameters([0.1, 0.5, 0.9])
cls.proc2.add_metacommunity_parameters(
metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option="simulated"
)
cls.proc2.add_protracted_parameters(min_speciation_gen=5, max_speciation_gen=1000)
cls.proc2.apply()
cls.proc3.set_speciation_parameters([0.1, 0.5, 0.9])
cls.proc3.add_protracted_parameters(min_speciation_gen=5, max_speciation_gen=1000)
cls.proc3.add_metacommunity_parameters(
metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option="simulated"
)
cls.proc3.apply()
def testEquivalentMethodsMatch(self):
"""Tests that equivalent methods of applying metacommunities produce equivalent results."""
for i in range(1, 4):
self.assertEqual(self.c1.get_species_richness(i), self.c2.get_species_richness(i))
self.assertEqual(self.proc1.get_species_richness(i), self.proc2.get_species_richness(i))
self.assertEqual(self.proc2.get_species_richness(i), self.proc3.get_species_richness(i))
def testMultipleProtractedError(self):
"""Tests that adding multiple protracted speciation parameters raises the correct error."""
with self.assertRaises(ValueError):
self.proc2.add_multiple_protracted_parameters()
class TestProtractedSpeciationEquality(unittest.TestCase):
"""Tests that analysis performs as expected when protracted speciation parameters match the minimums."""
@classmethod
def setUpClass(cls):
"""Copy the sample database."""
dst = os.path.join("output", "sample_protracted3.db")
shutil.copy(os.path.join("sample", "sample3.db"), dst)
cls.ct = CoalescenceTree(dst)
cls.ct.wipe_data()
def testApplyEqualParameters(self):
"""Tests that equal protracted parameters can be applied"""
self.ct.set_speciation_parameters(
[0.001, 0.1], protracted_speciation_min=100.0, protracted_speciation_max=10000.0
)
self.ct.apply()
self.assertEqual(1, self.ct.get_species_richness(1))
self.assertEqual(3, self.ct.get_species_richness(2))
class TestSpeciesAgesCalculations(unittest.TestCase):
"""Tests that operations associated with the species ages operate as expected"""
@classmethod
def setUpClass(cls):
"""Copies the sample databases and applies a basic set of community parameters."""
src = os.path.join("sample", "sample6.db")
dst = os.path.join("output", "sample6.db")
if os.path.exists(dst):
os.remove(dst)
shutil.copy(src, dst)
cls.dst_file = dst
def testSmallSimulation(self):
tree = CoalescenceTree(logging_level=50)
tree.set_database(self.dst_file)
with self.assertRaises(IOError):
_ = tree.get_species_ages()
with self.assertRaises(IOError):
_ = tree.get_species_ages_pd()
tree.wipe_data()
with self.assertRaises(IOError):
_ = tree.get_species_ages()
with self.assertRaises(IOError):
_ = tree.get_species_ages_pd()
tree.set_speciation_parameters(
speciation_rates=[0.000001, 0.0001],
record_spatial=False,
record_ages=True,
)
tree.apply()
self.assertTrue(check_sql_table_exist(tree.database, "SPECIES_AGES"))
expected_df = pd.read_csv(os.path.join("sample", "expected_species_ages.csv"))
actual_df = tree.get_species_ages_pd().reset_index(drop=True)
assert_frame_equal(expected_df, actual_df)
for community_ref, group in expected_df.groupby(["community_reference"]):
actual_output = sorted(tree.get_species_ages(community_ref), key=lambda x: x[0])
expected_output = group.drop(columns=["community_reference"]).sort_values(by=["species_id"]).values.tolist()
for ex, act in zip(expected_output, actual_output):
self.assertEqual(ex[0], act[0])
self.assertAlmostEqual(ex[1], act[1], delta=0.0000001)
| 47.515855 | 120 | 0.649506 | import os
import random
import shutil
import sqlite3
import sys
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from setup_tests import setUpAll, tearDownAll, skipLongTest
from pycoalescence import Simulation
from pycoalescence.coalescence_tree import CoalescenceTree, get_parameter_description
from pycoalescence.sqlite_connection import check_sql_table_exist
def setUpModule():
setUpAll()
t = CoalescenceTree("sample/sample.db")
t.clear_calculations()
def tearDownModule():
tearDownAll()
class TestNullSimulationErrors(unittest.TestCase):
def testRaisesError(self):
t = CoalescenceTree()
with self.assertRaises(RuntimeError):
t.get_species_richness()
with self.assertRaises(RuntimeError):
t.calculate_fragment_richness()
with self.assertRaises(RuntimeError):
t.calculate_alpha_diversity()
with self.assertRaises(RuntimeError):
t.calculate_beta_diversity()
with self.assertRaises(RuntimeError):
t.calculate_fragment_abundances()
with self.assertRaises(RuntimeError):
t.calculate_fragment_octaves()
with self.assertRaises(RuntimeError):
t.calculate_octaves()
with self.assertRaises(RuntimeError):
t.get_fragment_list()
with self.assertRaises(RuntimeError):
t.get_alpha_diversity()
with self.assertRaises(RuntimeError):
t.get_beta_diversity()
with self.assertRaises(RuntimeError):
t.get_community_references()
with self.assertRaises(RuntimeError):
t.get_metacommunity_references()
with self.assertRaises(RuntimeError):
t.get_species_locations()
with self.assertRaises(RuntimeError):
t.get_species_abundances()
with self.assertRaises(RuntimeError):
t.get_species_list()
with self.assertRaises(RuntimeError):
_ = t.get_simulation_parameters()
with self.assertRaises(RuntimeError):
t.get_fragment_abundances("null", 1)
with self.assertRaises(RuntimeError):
t.get_species_richness()
with self.assertRaises(RuntimeError):
t.get_octaves(1)
class TestParameterDescriptions(unittest.TestCase):
def testReadsCorrectly(self):
tmp_dict = {
"habitat_change_rate": "the rate of change from present density maps to historic density maps",
"sample_file": "the sample area map for spatially selective sampling. Can be null to sample all " "cells",
"sample_x": "the sample map x dimension",
"sample_y": "the sample map y dimension",
"sample_x_offset": "the sample x map offset from the grid",
"sample_y_offset": "the sample y map offset from the grid",
"output_dir": "the output directory for the simulation database",
"seed": "the random seed to start the simulation, for repeatability",
"coarse_map_x": "the coarse density map x dimension",
"fine_map_file": "the density map file location at the finer resolution, covering a smaller area",
"tau": "the tau dispersal value for fat-tailed dispersal",
"grid_y": "the simulated grid y dimension",
"dispersal_relative_cost": "the relative rate of moving through non-habitat compared to habitat",
"fine_map_y_offset": "the number of cells the fine map is offset from the sample map in the y "
"dimension, at the fine resolution",
"gen_since_historical": "the number of generations that occur before the historical, or historic,"
" state is reached",
"dispersal_method": "the dispersal method used. Can be one of 'normal', 'norm-uniform' or " "'fat-tail'.",
"historical_fine_map": "the historical, or historic, coarse density map file location",
"coarse_map_scale": "the scale of the coarse density map compared to the fine density map. 1 "
"means equal density",
"grid_x": "the simulated grid x dimension",
"coarse_map_file": "the density map file location at the coarser resolution, covering a larger " "area",
"min_num_species": "the minimum number of species known to exist (currently has no effect)",
"historical_coarse_map": "the historical, or historic, coarse density map file location",
"m_probability": "the probability of choosing from the uniform dispersal kernel in normal-uniform"
" dispersal",
"sigma": "the sigma dispersal value for normal, fat-tailed and normal-uniform dispersals",
"deme": "the number of individuals inhabiting a cell at a map density of 1",
"time_config_file": "will be 'set' if temporal sampling is used, 'null' otherwise",
"coarse_map_y": "the coarse density map y dimension",
"fine_map_x": "the fine density map x dimension",
"coarse_map_y_offset": "the number of cells the coarse map is offset from the fine map in the y "
"dimension, at the fine resolution",
"cutoff": "the maximal dispersal distance possible, for normal-uniform dispersal",
"fine_map_y": "the fine density map y dimension",
"sample_size": "the proportion of individuals to sample from each cell (0-1)",
"fine_map_x_offset": "the number of cells the fine map is offset from the sample map in the x "
"dimension, at the fine resolution",
"speciation_rate": "the minimum speciation rate the simulation was run with",
"task": "the job or task reference number given to this simulation",
"coarse_map_x_offset": "the number of cells the coarse map is offset from the fine map in the x "
"dimension, at the fine resolution",
"landscape_type": "if false, landscapes have hard boundaries. Otherwise, can be infinite, "
"with 1s everywhere, or tiled_coarse or tiled_fine for repeated units of tiled "
"maps",
"max_time": "the maximum simulation time to run for (in seconds)",
"sim_complete": "set to true upon simulation completion, false for incomplete simulations",
"protracted": "if true, the simulation was run with protracted speciation.",
"min_speciation_gen": "the minimum number of generations required before speciation can occur",
"max_speciation_gen": "the maximum number of generations a lineage can exist before it is " "speciated",
"dispersal_map": "a tif file where rows represent cumulative dispersal probability to every other "
"cell, using the row number = x + (y * x_max)",
}
t = CoalescenceTree("sample/sample.db")
sim_output = t.get_simulation_parameters()
for key in sim_output.keys():
self.assertIn(key, get_parameter_description().keys())
self.assertEqual(get_parameter_description(key), t.get_parameter_description(key))
for key in get_parameter_description().keys():
self.assertIn(key, sim_output.keys())
for key in tmp_dict.keys():
self.assertEqual(tmp_dict[key], get_parameter_description(key))
self.assertDictEqual(tmp_dict, get_parameter_description())
with self.assertRaises(KeyError):
get_parameter_description(key="notakey")
dispersal_parameters = t.dispersal_parameters()
expected_disp_dict = {
"dispersal_method": "normal",
"sigma": 3.55,
"tau": 0.470149,
"m_probability": 0,
"cutoff": 0,
}
for key in dispersal_parameters.keys():
self.assertIn(key, tmp_dict.keys())
self.assertIn(key, expected_disp_dict.keys())
for key, val in expected_disp_dict.items():
self.assertIn(key, dispersal_parameters.keys())
if isinstance(val, float):
self.assertAlmostEqual(val, dispersal_parameters[key])
else:
self.assertEqual(val, dispersal_parameters[key])
class TestCoalescenceTreeSettingSpeciationParameters(unittest.TestCase):
@classmethod
def setUpClass(cls):
src = [os.path.join("sample", "sample{}.db".format(x)) for x in [2, 3]]
cls.dst = [os.path.join("output", "sample{}.db".format(x)) for x in [2, 3]]
for tmp_src, tmp_dst in zip(src, cls.dst):
if os.path.exists(tmp_dst):
os.remove(tmp_dst)
shutil.copy(tmp_src, tmp_dst)
def testSetSpeciationRates(self):
ct = CoalescenceTree(self.dst[0])
for attempt in ["a string", ["a", "string"], [["list", "list2"], 0.2, 0.1], [None]]:
with self.assertRaises(TypeError):
ct._set_speciation_rates(attempt)
with self.assertRaises(RuntimeError):
ct._set_speciation_rates(None)
for attempt in [-10, -2.0, 1.1, 100, [-1, 0.1, 0.2], [0.2, 0.8, 1.1]]:
with self.assertRaises(ValueError):
ct._set_speciation_rates(attempt)
expected_list = [0.1, 0.2, 0.3]
ct._set_speciation_rates(expected_list)
self.assertEqual(expected_list, ct.applied_speciation_rates_list)
ct._set_speciation_rates(0.2)
self.assertEqual([0.2], ct.applied_speciation_rates_list)
def testSetRecordFragments(self):
ct = CoalescenceTree(self.dst[0])
ct._set_record_fragments(True)
self.assertEqual("null", ct.record_fragments)
ct._set_record_fragments(False)
self.assertEqual("F", ct.record_fragments)
for each in ["PlotBiodiversityMetrics.db", "doesntexist.csv"]:
config_path = os.path.join("sample", each)
with self.assertRaises(IOError):
ct._set_record_fragments(config_path)
expected = os.path.join("sample", "FragmentsTest.csv")
ct._set_record_fragments(expected)
self.assertEqual(expected, ct.record_fragments)
def testSetRecordSpatial(self):
ct = CoalescenceTree(self.dst[0])
ct._set_record_spatial("T")
self.assertTrue(ct.record_spatial)
ct._set_record_spatial("F")
self.assertFalse(ct.record_spatial)
with self.assertRaises(TypeError):
ct._set_record_spatial("nota bool")
ct._set_record_spatial(True)
self.assertTrue(ct.record_spatial)
def testSetMetacommunityParameters(self):
ct = CoalescenceTree(self.dst[0])
for size, spec in [[-10, 0.1], [10, -0.1], [10, 1.1]]:
with self.assertRaises(ValueError):
ct.fragments = "F"
ct._set_record_fragments(False)
ct._set_record_spatial(False)
ct.times = [0.0]
ct._set_metacommunity_parameters(size, spec)
ct._set_metacommunity_parameters()
self.assertEqual(0.0, ct.metacommunity_size)
self.assertEqual(0.0, ct.metacommunity_speciation_rate)
ct._set_metacommunity_parameters(10, 0.1, "simulated")
self.assertEqual(10, ct.metacommunity_size)
self.assertEqual(0.1, ct.metacommunity_speciation_rate)
def testSetProtractedParameters(self):
ct = CoalescenceTree(self.dst[0])
with self.assertRaises(ValueError):
ct._set_protracted_parameters(0.1, 100)
ct = CoalescenceTree(self.dst[1])
ct._set_protracted_parameters(10, 100)
self.assertEqual((10.0, 100.0), ct.protracted_parameters[0])
ct.protracted_parameters = []
for min_proc, max_proc in [[200, 5000], [80, 50], [200, 11000]]:
with self.assertRaises(ValueError):
ct._check_protracted_parameters(min_proc, max_proc)
with self.assertRaises(ValueError):
ct._set_protracted_parameters(min_proc, max_proc)
with self.assertRaises(ValueError):
ct.add_protracted_parameters(min_proc, max_proc)
ct._set_protracted_parameters(50, 5000)
self.assertEqual((50.0, 5000.0), ct.protracted_parameters[0])
ct.protracted_parameters = []
ct._set_protracted_parameters()
self.assertEqual((0.0, 0.0), ct.protracted_parameters[0])
def testSetSampleFile(self):
ct = CoalescenceTree(self.dst[0])
for file in ["notafile.tif", os.path.join("sample", "sample.db")]:
with self.assertRaises(IOError):
ct._set_sample_file(file)
ct._set_sample_file()
self.assertEqual("null", ct.sample_file)
expected_file = os.path.join("sample", "SA_sample_coarse.tif")
ct._set_sample_file(expected_file)
self.assertEqual(expected_file, ct.sample_file)
def testSetTimes(self):
ct = CoalescenceTree(self.dst[0])
ct._set_times(None)
self.assertEqual(0.0, ct.times[0])
with self.assertRaises(TypeError):
ct.add_times(0.5)
with self.assertRaises(TypeError):
ct.add_times([0.2, 0.5, "string"])
ct.times = None
ct.add_times([0.2, 0.5, 10])
self.assertEqual([0.0, 0.2, 0.5, 10.0], ct.times)
ct.times = None
ct._set_times(0.2)
self.assertEqual([0.0, 0.2], ct.times)
ct.times = None
ct._set_times([0.1, 0.5, 10.0])
self.assertEqual([0.0, 0.1, 0.5, 10.0], ct.times)
class TestCoalescenceTreeParameters(unittest.TestCase):
def testCommunityParameters1(self):
shutil.copyfile(os.path.join("sample", "sample3.db"), os.path.join("output", "temp_sample3.db"))
t = CoalescenceTree(os.path.join("output", "temp_sample3.db"), logging_level=50)
self.assertEqual([], t.get_metacommunity_references())
self.assertEqual([1], t.get_community_references())
params = t.get_community_parameters(1)
expected_dict = {
"speciation_rate": 0.001,
"time": 0.0,
"fragments": 0,
"metacommunity_reference": 0,
"min_speciation_gen": 100.0,
"max_speciation_gen": 10000.0,
}
self.assertEqual(expected_dict, params)
with self.assertRaises(sqlite3.Error):
t.get_metacommunity_parameters(1)
with self.assertRaises(KeyError):
t.get_community_parameters(2)
with self.assertRaises(KeyError):
t.get_community_reference(0.1, 0.0, 0, 0, 0.0, min_speciation_gen=100.0, max_speciation_gen=10000.0)
with self.assertRaises(KeyError):
_ = t.get_community_reference(speciation_rate=0.001, time=0.0, fragments=False)
ref = t.get_community_reference(
speciation_rate=0.001, time=0.0, fragments=False, min_speciation_gen=100.0, max_speciation_gen=10000.0
)
self.assertEqual(1, ref)
self.assertEqual(expected_dict, t.get_community_parameters(ref))
t.wipe_data()
with self.assertRaises(IOError):
t.get_community_parameters_pd()
def testCommunityParameters2(self):
t = CoalescenceTree(os.path.join("sample", "sample4.db"))
self.assertEqual([1, 2, 3, 4, 5], t.get_community_references())
expected_params1 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 0}
expected_params2 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 1}
expected_params3 = {"speciation_rate": 0.2, "time": 0.0, "fragments": 0, "metacommunity_reference": 1}
expected_params4 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 2}
expected_params5 = {"speciation_rate": 0.2, "time": 0.0, "fragments": 0, "metacommunity_reference": 2}
expected_meta_params1 = {
"speciation_rate": 0.001,
"metacommunity_size": 10000.0,
"option": "simulated",
"external_reference": 0,
}
expected_meta_params2 = {
"speciation_rate": 0.001,
"metacommunity_size": 10000.0,
"option": "analytical",
"external_reference": 0,
}
params1 = t.get_community_parameters(1)
params2 = t.get_community_parameters(2)
params3 = t.get_community_parameters(3)
params4 = t.get_community_parameters(4)
params5 = t.get_community_parameters(5)
params6 = t.get_metacommunity_parameters(1)
params7 = t.get_metacommunity_parameters(2)
self.assertEqual([1, 2], t.get_metacommunity_references())
self.assertEqual(expected_params1, params1)
self.assertEqual(expected_params2, params2)
self.assertEqual(expected_params3, params3)
self.assertEqual(expected_params4, params4)
self.assertEqual(expected_params5, params5)
self.assertEqual(expected_meta_params1, params6)
self.assertEqual(expected_meta_params2, params7)
with self.assertRaises(KeyError):
t.get_community_parameters(6)
with self.assertRaises(KeyError):
t.get_metacommunity_parameters(3)
ref1 = t.get_community_reference(speciation_rate=0.1, time=0.0, fragments=False)
with self.assertRaises(KeyError):
t.get_community_reference(
speciation_rate=0.1, time=0.0, fragments=False, min_speciation_gen=0.1, max_speciation_gen=10000.0
)
ref2 = t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="simulated",
)
with self.assertRaises(KeyError):
t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.01,
metacommunity_option="simulated",
)
ref3 = t.get_community_reference(
speciation_rate=0.2,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="simulated",
)
ref4 = t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="analytical",
)
ref5 = t.get_community_reference(
speciation_rate=0.2,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="analytical",
)
self.assertEqual(1, ref1)
self.assertEqual(2, ref2)
self.assertEqual(3, ref3)
self.assertEqual(4, ref4)
self.assertEqual(5, ref5)
expected_community_params_list = []
for reference in t.get_community_references():
params = t.get_community_parameters(reference)
params["reference"] = reference
expected_community_params_list.append(params)
expected_community_params = pd.DataFrame(expected_community_params_list)
actual_output = t.get_community_parameters_pd()
assert_frame_equal(expected_community_params, actual_output, check_like=True)
def testIsComplete(self):
t = CoalescenceTree(os.path.join("sample", "sample4.db"))
self.assertTrue(t.is_complete)
class TestCoalescenceTreeAnalysis(unittest.TestCase):
@classmethod
def setUpClass(cls):
dst1 = os.path.join("output", "sampledb0.db")
for i in range(0, 11):
dst = os.path.join("output", "sampledb{}.db".format(i))
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(os.path.join("sample", "sample.db"), dst)
shutil.copyfile(os.path.join("sample", "nse_reference.db"), os.path.join("output", "nse_reference1.db"))
random.seed(2)
cls.test = CoalescenceTree(dst1, logging_level=50)
cls.test.clear_calculations()
cls.test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
cls.test.calculate_fragment_richness()
cls.test.calculate_fragment_octaves()
cls.test.calculate_octaves_error()
cls.test.calculate_alpha_diversity()
cls.test.calculate_beta_diversity()
cls.test2 = CoalescenceTree()
cls.test2.set_database(os.path.join("sample", "sample_nofrag.db"))
dstx = os.path.join("output", "sampledbx.db")
shutil.copyfile(dst1, dstx)
c = CoalescenceTree(dstx)
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_goodness_of_fit()
@classmethod
def tearDownClass(cls):
cls.test.clear_calculations()
def testComparisonDataNoExistError(self):
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.import_comparison_data(os.path.join("sample", "doesnotexist.db"))
def testFragmentOctaves(self):
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'P09' AND octave == 0"
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 7, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'P09' AND octave == 0 "
" AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 7, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'cerrogalera' AND octave == 1 "
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 3, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'whole' AND octave == 1 "
" AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 221, msg="Fragment octaves not correctly calculated.")
def testFragmentAbundances(self):
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'P09' " " AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'P09' " " AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'cerrogalera' "
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
def testSpeciesAbundances(self):
num = self.test.cursor.execute(
"SELECT COUNT(species_id) FROM SPECIES_ABUNDANCES WHERE community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 1029, msg="Species abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(species_id) FROM SPECIES_ABUNDANCES WHERE community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 884, msg="Species abundances not correctly calculated.")
def testGetOctaves(self):
c = CoalescenceTree(os.path.join("output", "sampledb4.db"))
c.clear_calculations()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_richness()
self.assertEqual([[0, 585], [1, 231], [2, 59], [3, 5]], c.get_octaves(1))
c = CoalescenceTree(os.path.join("output", "sampledb4.db"))
c.clear_calculations()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_richness()
actual = c.get_octaves_pd().head()
expected = pd.DataFrame(
[[1, 0, 585], [1, 1, 231], [1, 2, 59], [1, 3, 5], [2, 0, 760]],
columns=["community_reference", "octave", "richness"],
)
assert_frame_equal(actual, expected, check_like=True)
def testSpeciesLocations(self):
num = self.test.cursor.execute(
"SELECT species_id FROM SPECIES_LOCATIONS WHERE x==1662 AND y==4359 " " AND community_reference == 1"
).fetchall()
self.assertEqual(len(set(num)), 2, msg="Species locations not correctly assigned")
all_list = self.test.get_species_locations()
select_list = self.test.get_species_locations(community_reference=1)
self.assertListEqual([1, 1662, 4359, 1], all_list[0])
self.assertListEqual([1, 1662, 4359], select_list[0])
def testAlphaDiversity(self):
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.get_alpha_diversity_pd()
self.assertEqual(9, self.test.get_alpha_diversity(1))
self.assertEqual(10, self.test.get_alpha_diversity(2))
expected_alphas_list = []
for reference in self.test.get_community_references():
expected_alphas_list.append(
{"community_reference": reference, "alpha_diversity": self.test.get_alpha_diversity(reference)}
)
expected_alphas = pd.DataFrame(expected_alphas_list).reset_index(drop=True)
actual_alphas = self.test.get_alpha_diversity_pd().reset_index(drop=True)
assert_frame_equal(expected_alphas, actual_alphas, check_like=True)
def testBetaDiversity(self):
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.get_beta_diversity_pd()
self.assertAlmostEqual(98.111111111, self.test.get_beta_diversity(1), places=5)
self.assertAlmostEqual(102.8, self.test.get_beta_diversity(2), places=5)
expected_betas_list = []
for reference in self.test.get_community_references():
expected_betas_list.append(
{"community_reference": reference, "beta_diversity": self.test.get_beta_diversity(reference)}
)
expected_betas = pd.DataFrame(expected_betas_list).reset_index(drop=True)
actual_betas = self.test.get_beta_diversity_pd().reset_index(drop=True)
assert_frame_equal(expected_betas, actual_betas, check_like=True)
def testGetNumberIndividuals(self):
c = CoalescenceTree(os.path.join("output", "sampledb7.db"))
self.assertEqual(1504, c.get_number_individuals(community_reference=1))
self.assertEqual(12, c.get_number_individuals(fragment="P09", community_reference=1))
c.wipe_data()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
with self.assertRaises(IOError):
c.get_number_individuals(fragment="none")
with self.assertRaises(IOError):
c.get_number_individuals()
def testGetFragmentAbundances(self):
c = CoalescenceTree(os.path.join("sample", "sample3.db"))
with self.assertRaises(IOError):
c.get_fragment_abundances(fragment="P09", reference=1)
with self.assertRaises(IOError):
c.get_fragment_abundances_pd()
abundances = self.test.get_fragment_abundances(fragment="P09", reference=1)
expected_abundances = [[302, 1], [303, 1], [304, 1], [305, 1], [306, 1], [307, 1], [546, 2], [693, 1], [732, 3]]
self.assertEqual(expected_abundances, abundances[:10])
all_abundances = self.test.get_all_fragment_abundances()
expected_abundances2 = [
[1, "P09", 302, 1],
[1, "P09", 303, 1],
[1, "P09", 304, 1],
[1, "P09", 305, 1],
[1, "P09", 306, 1],
[1, "P09", 307, 1],
[1, "P09", 546, 2],
[1, "P09", 693, 1],
[1, "P09", 732, 3],
[1, "cerrogalera", 416, 1],
]
self.assertEqual(expected_abundances2, all_abundances[:10])
df = pd.DataFrame(
expected_abundances2, columns=["community_reference", "fragment", "species_id", "no_individuals"]
)
actual_df = self.test.get_fragment_abundances_pd().head(n=10)
assert_frame_equal(df, actual_df, check_like=True)
def testGetFragmentListErrors(self):
c = CoalescenceTree(os.path.join("output", "sampledb8.db"))
c.wipe_data()
with self.assertRaises(IOError):
c.get_fragment_list()
def testClearGoodnessFit(self):
c = CoalescenceTree(os.path.join("output", "sampledbx.db"))
exec_command = "SELECT * FROM BIODIVERSITY_METRICS WHERE metric LIKE 'goodness_%'"
self.assertTrue(len(c.cursor.execute(exec_command).fetchall()) >= 1)
c._clear_goodness_of_fit()
self.assertFalse(len(c.cursor.execute(exec_command).fetchall()) >= 1)
def testGetBiodiversityMetrics(self):
c1 = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c1.get_biodiversity_metrics()
c2 = CoalescenceTree(os.path.join("sample", "sample2.db"))
expected_biodiversity_metrics = pd.DataFrame(
[
[1, "fragment_richness", "fragment2", 129.0, np.NaN, np.NaN],
[2, "fragment_richness", "fragment2", 130.0, np.NAN, np.NaN],
[1, "fragment_richness", "fragment1", 174.0, np.NaN, np.NaN],
[2, "fragment_richness", "fragment1", 175.0, np.NaN, np.NaN],
[1, "fragment_richness", "whole", 1163.0, np.NaN, np.NaN],
[2, "fragment_richness", "whole", 1170.0, np.NaN, np.NaN],
],
columns=["community_reference", "metric", "fragment", "value", "simulated", "actual"],
).reset_index(drop=True)
actual_biodiversity_metrics = c2.get_biodiversity_metrics().reset_index(drop=True).fillna(value=np.nan)
assert_frame_equal(expected_biodiversity_metrics, actual_biodiversity_metrics)
def testRaisesErrorNoFragmentsAlpha(self):
with self.assertRaises(IOError):
self.test2.calculate_alpha_diversity()
def testRaisesErrorNoFragmentsBeta(self):
with self.assertRaises(IOError):
self.test2.calculate_beta_diversity()
def testRaisesErrorNoFragmentsRichness(self):
with self.assertRaises(IOError):
self.test2.calculate_fragment_richness()
def testRaisesErrorNoFragmentsOctaves(self):
with self.assertRaises(IOError):
self.test2.calculate_fragment_octaves()
@unittest.skipIf(sys.version[0] != "3", "Skipping Python 3.x tests")
def testModelFitting2(self):
random.seed(2)
self.test.calculate_goodness_of_fit()
self.assertAlmostEqual(self.test.get_goodness_of_fit(), 0.30140801329929373, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_octaves(), 0.0680205429120108, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_richness(), 0.9244977999898334, places=6)
@unittest.skipIf(sys.version[0] == "3", "Skipping Python 2.x tests")
def testModelFitting3(self):
random.seed(2)
self.test.calculate_goodness_of_fit()
self.assertAlmostEqual(self.test.get_goodness_of_fit(), 0.30140801329929373, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_octaves(), 0.0680205429120108, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_richness(), 0.9244977999898334, places=6)
def testErrorIfNotApplied(self):
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(RuntimeError):
c.output()
def testFragmentNumbersMatching(self):
test = CoalescenceTree(os.path.join("output", "sampledb1.db"), logging_level=50)
test.clear_calculations()
with self.assertRaises(RuntimeError):
test._check_fragment_numbers_match()
with self.assertRaises(ValueError):
test.calculate_fragment_abundances()
test._check_fragment_numbers_match()
test.comparison_file = os.path.join("sample", "PlotBiodiversityMetrics.db")
self.assertTrue(test._check_fragment_numbers_match())
test.fragment_abundances.pop(0)
self.assertFalse(test._check_fragment_numbers_match())
def testFragmentNumbersEqualisation(self):
test = CoalescenceTree(os.path.join("output", "sampledb2.db"), logging_level=50)
test.clear_calculations()
test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
test.calculate_fragment_richness()
self.test._equalise_fragment_number("notafrag", 1)
test.fragment_abundances[0][2] += 1000
test._equalise_fragment_number("P09", 1)
self.assertTrue(test._check_fragment_numbers_match())
def testFragmentNumbersErrors(self):
test = CoalescenceTree(os.path.join("output", "sampledb3.db"), logging_level=50)
test.clear_calculations()
test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
test.comparison_abundances = None
with self.assertRaises(ValueError):
test._equalise_all_fragment_numbers()
def testAdjustBiodiversityMetrics(self):
test = CoalescenceTree(os.path.join("output", "sampledb5.db"), logging_level=50)
test.clear_calculations()
test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
test.adjust_data()
def testComparisonOctavesModification(self):
test = CoalescenceTree(os.path.join("output", "sampledb6.db"), logging_level=50)
dst = os.path.join("output", "PlotBiodiversityMetricsNoAlpha2.db")
shutil.copy(os.path.join("sample", "PlotBiodiversityMetricsNoAlpha.db"), dst)
test.import_comparison_data(dst)
test.calculate_comparison_octaves(store=True)
self.assertTrue(os.path.exists(dst))
@unittest.skipIf(sys.version[0] == "2", "Skipping Python 3.x tests")
def testDownsamplingAndRevert(self):
c = CoalescenceTree(os.path.join("output", "sampledb9.db"))
random.seed(a=10, version=3)
original_individuals = c.get_number_individuals()
original_richness = c.get_species_richness_pd()
c.wipe_data()
with self.assertRaises(ValueError):
c.downsample(sample_proportion=2.0)
c.downsample(sample_proportion=0.1)
c.set_speciation_parameters([0.1, 0.2])
c.apply()
new_individuals = c.get_number_individuals()
self.assertEqual(1452, new_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "sampledb9.db"))
c.revert_downsample()
c.wipe_data()
c.set_speciation_parameters([0.1, 0.2])
c.apply()
final_individuals = c.get_number_individuals()
assert_frame_equal(original_richness, c.get_species_richness_pd())
self.assertEqual(original_individuals, final_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertFalse(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "nse_reference1.db"))
nse_richness = c.get_species_richness_pd()
nse_no_individuals = c.get_number_individuals()
c.wipe_data()
c.downsample(sample_proportion=0.1)
c.set_speciation_parameters([0.000001, 0.999999])
c.apply()
new_no_individuals = c.get_number_individuals()
self.assertAlmostEqual(new_no_individuals / nse_no_individuals, 0.1, 5)
self.assertEqual(1000, c.get_species_richness(reference=2))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "nse_reference1.db"))
c.revert_downsample()
c.wipe_data()
c.set_speciation_parameters([0.000001, 0.999999])
c.apply_incremental()
c.set_speciation_parameters([0.5])
c.apply()
actual_richness = c.get_species_richness_pd()
assert_frame_equal(nse_richness, actual_richness)
self.assertEqual(nse_no_individuals, c.get_number_individuals())
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertFalse(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
with self.assertRaises(IOError):
c.revert_downsample()
@unittest.skipIf(sys.version[0] == "2", "Skipping Python 3.x tests")
def testDownsamplingByLocationAndRevert(self):
c = CoalescenceTree(os.path.join("output", "sampledb10.db"))
random.seed(a=10, version=3)
original_individuals = c.get_number_individuals()
original_richness = c.get_species_richness_pd()
c.wipe_data()
with self.assertRaises(ValueError):
c.downsample_at_locations(fragment_csv=os.path.join("sample", "FragmentsTestFail1.csv"))
with self.assertRaises(IOError):
c.downsample_at_locations(fragment_csv="not_a_file.csv")
c.downsample_at_locations(fragment_csv=os.path.join("sample", "FragmentsTest3.csv"))
c.set_speciation_parameters([0.1, 0.2])
c.apply()
new_individuals = c.get_number_individuals()
self.assertEqual(2, new_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "sampledb10.db"))
c.revert_downsample()
c.wipe_data()
c.set_speciation_parameters([0.1, 0.2])
c.apply()
final_individuals = c.get_number_individuals()
assert_frame_equal(original_richness, c.get_species_richness_pd())
self.assertEqual(original_individuals, final_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertFalse(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "sampledb10.db"))
c.wipe_data()
c.downsample_at_locations(fragment_csv=os.path.join("sample", "FragmentsTest4.csv"), ignore_errors=True)
c.set_speciation_parameters([0.1, 0.2])
c.apply()
new_individuals = c.get_number_individuals()
self.assertEqual(3, new_individuals)
class TestCoalescenceTreeWriteCsvs(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.c = CoalescenceTree(os.path.join("sample", "nse_reference.db"))
def testWriteCommunityParameterToCsv(self):
output_csv = os.path.join("output", "community_parameters1.csv")
self.c.write_to_csv(output_csv, "COMMUNITY_PARAMETERS")
self.assertTrue(os.path.exists(output_csv))
import csv
if sys.version_info[0] < 3:
infile = open(output_csv, "rb")
else:
infile = open(output_csv, "r")
expected_output = [
["reference", "speciation_rate", "time", "fragments", "metacommunity_reference"],
["1", "1e-06", "0.0", "0", "0"],
["2", "0.99999", "0.0", "0", "0"],
["3", "0.5", "0.0", "0", "0"],
]
actual_output = []
with infile as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
actual_output.append(row)
self.assertEqual(expected_output, actual_output)
with self.assertRaises(IOError):
self.c.write_to_csv(output_csv, "COMMUNITY_PARAMETERS")
with self.assertRaises(KeyError):
self.c.write_to_csv("notacsv.csv", "NOTATABLE")
def testWritesAllCsvs(self):
output_dir = os.path.join("output", "csvdir")
if os.path.exists(output_dir):
os.remove(output_dir)
self.c.write_all_to_csvs(output_dir, "out1")
expected_tables = ["COMMUNITY_PARAMETERS", "SIMULATION_PARAMETERS", "SPECIES_ABUNDANCES", "SPECIES_LIST"]
for table in expected_tables:
self.assertTrue(os.path.exists(os.path.join(output_dir, "out1_{}.csv".format(table))))
for file in os.listdir(output_dir):
if ".csv" in file:
self.assertIn(file, ["out1_{}.csv".format(x) for x in expected_tables])
self.c.write_all_to_csvs(output_dir, "out2.csv")
for table in expected_tables:
self.assertTrue(os.path.exists(os.path.join(output_dir, "out2_{}.csv".format(table))))
self.c.write_all_to_csvs(output_dir, "out3.")
for table in expected_tables:
self.assertTrue(os.path.exists(os.path.join(output_dir, "out3_{}.csv".format(table))))
class TestCoalescenceTreeSpeciesDistances(unittest.TestCase):
@classmethod
def setUpClass(cls):
dst = os.path.join("output", "sampledb1.db")
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(os.path.join("sample", "sample.db"), dst)
cls.test = CoalescenceTree(dst)
cls.test.clear_calculations()
cls.test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
cls.test.calculate_species_distance_similarity()
def testSpeciesDistanceSimilarity(self):
mean = self.test.cursor.execute(
"SELECT value FROM BIODIVERSITY_METRICS WHERE community_reference == 1 AND "
"metric == 'mean_distance_between_individuals'"
).fetchone()[0]
self.assertAlmostEqual(mean, 5.423769507803121, places=5)
species_distances = self.test.get_species_distance_similarity(community_reference=1)
self.assertListEqual(species_distances[0], [0, 11])
self.assertListEqual(species_distances[1], [1, 274])
self.assertListEqual(species_distances[2], [2, 289])
class TestCoalescenceTreeAnalyseIncorrectComparison(unittest.TestCase):
@classmethod
def setUpClass(cls):
random.seed(10)
dst = os.path.join("output", "sampledb2.db")
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(os.path.join("sample", "sample.db"), dst)
cls.test = CoalescenceTree(logging_level=40)
cls.test.set_database(dst)
cls.test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetricsNoAlpha.db"))
cls.test.calculate_comparison_octaves(False)
cls.test.clear_calculations()
cls.test.calculate_fragment_richness()
cls.test.calculate_fragment_octaves()
cls.test.calculate_octaves_error()
cls.test.calculate_alpha_diversity()
cls.test.calculate_alpha_diversity()
cls.test.calculate_beta_diversity()
cls.test2 = CoalescenceTree()
cls.test2.set_database(os.path.join("sample", "sample_nofrag.db"))
@classmethod
def tearDownClass(cls):
cls.test.clear_calculations()
def testRaisesErrorMismatchParameters(self):
with self.assertRaises(ValueError):
self.test.calculate_goodness_of_fit()
class TestSimulationAnalysisTemporal(unittest.TestCase):
@classmethod
def setUpClass(cls):
src = os.path.join("sample", "sample2.db")
dst = os.path.join("output", "sample2.db")
if not os.path.exists(dst):
shutil.copy(src, dst)
cls.tree = CoalescenceTree()
cls.tree.set_database(dst)
cls.tree.wipe_data()
def testTimesWrongFormatError(self):
with self.assertRaises(TypeError):
self.tree.set_speciation_parameters([0.4, 0.6], times=[0.1, 0.2, "notafloat"])
with self.assertRaises(TypeError):
self.tree.set_speciation_parameters([0.4, 0.6], times="notafloat")
self.tree.times = []
self.tree.set_speciation_parameters([0.4, 0.6], times=[0, 1, 10])
self.assertEqual([0.0, 1.0, 10.0], self.tree.times)
class TestSimulationAnalysis(unittest.TestCase):
@classmethod
def setUpClass(cls):
src = os.path.join("sample", "sample2.db")
dst = os.path.join("output", "sample2.db")
if os.path.exists(dst):
os.remove(dst)
shutil.copy(src, dst)
cls.tree = CoalescenceTree(logging_level=50)
cls.tree.set_database(dst)
cls.tree.wipe_data()
cls.tree.set_speciation_parameters(
speciation_rates=[0.5, 0.7],
record_spatial="T",
record_fragments=os.path.join("sample", "FragmentsTest.csv"),
sample_file=os.path.join("sample", "SA_samplemaskINT.tif"),
)
cls.tree.apply()
cls.tree.calculate_fragment_richness()
cls.tree.calculate_fragment_octaves()
np.random.seed(100)
def testSetDatabaseErrors(self):
sim = Simulation()
c = CoalescenceTree()
with self.assertRaises(RuntimeError):
c.set_database(sim)
c = CoalescenceTree()
with self.assertRaises(IOError):
c.set_database(os.path.join("sample", "failsampledoesntexist.db"))
def testFragmentConfigNoExistError(self):
tree = CoalescenceTree(self.tree.file)
with self.assertRaises(IOError):
tree.set_speciation_parameters(
speciation_rates=[0.5, 0.7],
record_spatial="T",
record_fragments=os.path.join("sample", "notafragmentconfig.csv"),
sample_file=os.path.join("sample", "SA_samplemaskINT.tif"),
)
with self.assertRaises(IOError):
tree.set_speciation_parameters(
speciation_rates=[0.5, 0.7],
record_spatial="T",
record_fragments=os.path.join("sample", "example_historical_fine.tif"),
sample_file=os.path.join("sample", "SA_samplemaskINT.tif"),
)
def testReadsFragmentsRichness(self):
sim_params = self.tree.get_simulation_parameters()
expected_params = dict(
seed=9,
task=1,
output_dir="output",
speciation_rate=0.5,
sigma=2.828427,
tau=2.0,
deme=1,
sample_size=0.1,
max_time=2.0,
dispersal_relative_cost=1.0,
min_num_species=1,
habitat_change_rate=0.0,
gen_since_historical=200.0,
time_config_file="null",
coarse_map_file="sample/SA_sample_coarse.tif",
coarse_map_x=35,
coarse_map_y=41,
coarse_map_x_offset=11,
coarse_map_y_offset=14,
coarse_map_scale=1.0,
fine_map_file="sample/SA_sample_fine.tif",
fine_map_x=13,
fine_map_y=13,
fine_map_x_offset=0,
fine_map_y_offset=0,
sample_file="sample/SA_samplemaskINT.tif",
grid_x=13,
grid_y=13,
sample_x=13,
sample_y=13,
sample_x_offset=0,
sample_y_offset=0,
historical_coarse_map="none",
historical_fine_map="none",
sim_complete=1,
dispersal_method="normal",
m_probability=0.0,
cutoff=0.0,
landscape_type="closed",
protracted=0,
min_speciation_gen=0.0,
max_speciation_gen=0.0,
dispersal_map="none",
)
for key in sim_params.keys():
self.assertEqual(
sim_params[key],
expected_params[key],
msg="Error in {}: {} != {}".format(key, sim_params[key], expected_params[key]),
)
fragment2_richness = ["fragment2", 1, 129]
self.assertEqual(self.tree.get_fragment_richness(fragment="fragment2", reference=1), 129)
self.assertEqual(self.tree.get_fragment_richness(fragment="fragment1", reference=2), 175)
octaves = self.tree.get_fragment_richness()
self.assertListEqual(fragment2_richness, [list(x) for x in octaves if x[0] == "fragment2" and x[1] == 1][0])
expected_fragment_richness = []
for reference in self.tree.get_community_references():
for fragment in self.tree.get_fragment_list(reference):
fragment_richness = self.tree.get_fragment_richness(fragment=fragment, reference=reference)
expected_fragment_richness.append(
{"fragment": fragment, "community_reference": reference, "fragment_richness": fragment_richness}
)
expected_fragment_richness_df = (
pd.DataFrame(expected_fragment_richness)
.sort_values(by=["fragment", "community_reference"])
.reset_index(drop=True)
)
actual_fragment_richness = self.tree.get_fragment_richness_pd().reset_index(drop=True)
assert_frame_equal(expected_fragment_richness_df, actual_fragment_richness, check_like=True)
def testGetsFragmentList(self):
fragment_list = self.tree.get_fragment_list()
expected_list = ["fragment1", "fragment2"]
self.assertListEqual(expected_list, fragment_list)
def testReadsFragmentAbundances(self):
expected_abundances = [
[610, 1],
[611, 1],
[612, 1],
[613, 1],
[614, 1],
[615, 1],
[616, 1],
[617, 1],
[618, 1],
[619, 1],
]
actual_abundances = self.tree.get_species_abundances(fragment="fragment2", reference=1)
for i, each in enumerate(expected_abundances):
self.assertListEqual(actual_abundances[i], each)
with self.assertRaises(ValueError):
self.tree.get_species_abundances(fragment="fragment2")
expected_fragment_abundances_list = []
for reference in self.tree.get_community_references():
for fragment in self.tree.get_fragment_list(reference):
fragment_abundances = self.tree.get_fragment_abundances(fragment=fragment, reference=reference)
for species_id, abundance in fragment_abundances:
expected_fragment_abundances_list.append(
{
"fragment": fragment,
"community_reference": reference,
"species_id": species_id,
"no_individuals": abundance,
}
)
expected_fragment_abundances = (
pd.DataFrame(expected_fragment_abundances_list)
.sort_values(by=["fragment", "community_reference", "species_id"])
.reset_index(drop=True)
)
actual_fragment_abundances = (
self.tree.get_fragment_abundances_pd()
.sort_values(by=["fragment", "community_reference", "species_id"])
.reset_index(drop=True)
)
assert_frame_equal(expected_fragment_abundances, actual_fragment_abundances, check_like=True)
def testFragmentRichnessRaiseError(self):
failtree = CoalescenceTree()
failtree.set_database(os.path.join("sample", "failsample.db"))
with self.assertRaises(IOError):
failtree.get_fragment_richness()
with self.assertRaises(IOError):
failtree.get_fragment_richness_pd()
with self.assertRaises(IOError):
self.tree.get_fragment_richness(fragment="fragment4", reference=1)
with self.assertRaises(SyntaxError):
self.tree.get_fragment_richness(fragment="fragment4")
with self.assertRaises(SyntaxError):
self.tree.get_fragment_richness(reference=1)
def testReadsFragmentOctaves(self):
octaves = self.tree.get_fragment_octaves(fragment="fragment2", reference=1)
octaves2 = self.tree.get_fragment_octaves(fragment="fragment1", reference=1)
all_octaves = self.tree.get_fragment_octaves()
desired = ["fragment1", 1, 0, 173]
self.assertListEqual([0, 128], octaves[0])
self.assertListEqual([0, 173], octaves2[0])
self.assertListEqual(desired, [x for x in all_octaves if x[0] == "fragment1" and x[1] == 1 and x[2] == 0][0])
expected_fragment_octaves_list = []
for reference in self.tree.get_community_references():
fragment_list = self.tree.get_fragment_list(reference)
fragment_list.append("whole")
for fragment in fragment_list:
try:
octaves = self.tree.get_fragment_octaves(fragment=fragment, reference=reference)
for octave, richness in octaves:
expected_fragment_octaves_list.append(
{
"fragment": fragment,
"community_reference": reference,
"octave": octave,
"richness": richness,
}
)
except RuntimeError:
continue
expected_fragment_octaves = (
pd.DataFrame(expected_fragment_octaves_list)
.sort_values(["fragment", "community_reference", "octave"], axis=0)
.reset_index(drop=True)
)
actual_fragment_octaves = (
self.tree.get_fragment_octaves_pd()
.sort_values(["fragment", "community_reference", "octave"], axis=0)
.reset_index(drop=True)
)
assert_frame_equal(expected_fragment_octaves, actual_fragment_octaves, check_like=True)
def testFragmentOctavesRaiseError(self):
failtree = CoalescenceTree()
try:
failtree.set_database("sample/failsample.db")
except sqlite3.Error:
pass
with self.assertRaises(sqlite3.Error):
failtree.get_fragment_octaves(fragment="fragment4", reference=100)
with self.assertRaises(RuntimeError):
self.tree.get_fragment_octaves(fragment="fragment4", reference=100)
with self.assertRaises(SyntaxError):
self.tree.get_fragment_octaves(fragment="fragment4")
with self.assertRaises(SyntaxError):
self.tree.get_fragment_octaves(reference=100)
def testFragmentSampling(self):
self.assertEqual(
10,
self.tree.sample_fragment_richness(
fragment="fragment1", number_of_individuals=10, n=1, community_reference=2
),
)
self.assertEqual(
10,
self.tree.sample_fragment_richness(
fragment="fragment2", number_of_individuals=10, n=10, community_reference=2
),
)
def testLandscapeSampling(self):
number_dict = {"fragment1": 3, "fragment2": 10}
np.random.seed(100)
self.assertEqual(
13, self.tree.sample_landscape_richness(number_of_individuals=number_dict, n=1, community_reference=2)
)
self.assertAlmostEqual(
99.9, self.tree.sample_landscape_richness(number_of_individuals=100, n=10, community_reference=1), places=3
)
def testRaisesSamplingErrors(self):
number_dict = {"fragment1": 3000000, "fragment2": 10}
with self.assertRaises(KeyError):
self.assertEqual(
13, self.tree.sample_landscape_richness(number_of_individuals=number_dict, n=1, community_reference=2)
)
number_dict2 = {"fragment": 10, "fragment2": 10}
with self.assertRaises(KeyError):
self.assertEqual(
13, self.tree.sample_landscape_richness(number_of_individuals=number_dict2, n=1, community_reference=2)
)
def testSpeciesRichness(self):
actual_species_richness = (
self.tree.get_species_richness_pd().sort_values(by=["community_reference"]).reset_index(drop=True)
)
expected_species_richness_list = []
for reference in self.tree.get_community_references():
expected_species_richness_list.append(
{"community_reference": reference, "richness": self.tree.get_species_richness(reference=reference)}
)
expected_species_richness = pd.DataFrame(expected_species_richness_list)
assert_frame_equal(actual_species_richness, expected_species_richness, check_like=True)
def testOctaves(self):
actual_species_octaves = (
self.tree.get_octaves_pd().sort_values(by=["community_reference", "octave"]).reset_index(drop=True)
)
expected_species_octaves_list = []
for reference in self.tree.get_community_references():
for octave, richness in self.tree.get_octaves(reference):
expected_species_octaves_list.append(
{"community_reference": reference, "octave": octave, "richness": richness}
)
expected_species_octaves = pd.DataFrame(expected_species_octaves_list)
assert_frame_equal(actual_species_octaves, expected_species_octaves, check_like=True)
class TestMetacommunityApplication(unittest.TestCase):
@classmethod
def setUpClass(cls):
src = os.path.join("sample", "sample.db")
for i in range(6):
dst = os.path.join("output", "sample_{}.db".format(i))
if os.path.exists(dst):
os.remove(dst)
shutil.copy2(src, dst)
def testMetacommunityAddingInvalidParameters(self):
tree = CoalescenceTree(os.path.join("output", "sample_0.db"))
tree.wipe_data()
with self.assertRaises(IOError):
tree.get_metacommunity_parameters_pd()
tree.set_speciation_parameters([0.1, 0.2])
for size, spec, opt, ref in [
[0, 0.1, "simulated", None],
[10, 0.0, "analytical", None],
[None, None, "analytical", None],
[10, 0.0, "path/to/file", None],
[0, 0.0, "path/to/file", None],
[0, 0.0, "path/to/not/a/file.db", 1],
]:
with self.assertRaises(ValueError):
tree.add_metacommunity_parameters(
metacommunity_size=size,
metacommunity_speciation_rate=spec,
metacommunity_option=opt,
metacommunity_reference=ref,
)
with self.assertRaises(IOError):
tree.add_metacommunity_parameters(metacommunity_option="not/a/file/db.db", metacommunity_reference=1)
def testMetacommunitySimulation(self):
tree = CoalescenceTree(os.path.join("output", "sample_1.db"))
tree.wipe_data()
tree.set_speciation_parameters(
[0.1, 0.2], metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option="simulated"
)
tree.add_metacommunity_parameters(
metacommunity_size=15000, metacommunity_speciation_rate=0.1, metacommunity_option="simulated"
)
tree.add_metacommunity_parameters(
metacommunity_size=100000, metacommunity_speciation_rate=0.001, metacommunity_option="simulated"
)
tree.apply()
params_1 = tree.get_metacommunity_parameters(1)
params_2 = tree.get_metacommunity_parameters(2)
params_3 = tree.get_metacommunity_parameters(3)
self.assertEqual(10000, params_1["metacommunity_size"])
self.assertEqual(0.001, params_1["speciation_rate"])
self.assertEqual("simulated", params_1["option"])
self.assertEqual(0, params_1["external_reference"])
self.assertEqual(15000, params_2["metacommunity_size"])
self.assertEqual(0.1, params_2["speciation_rate"])
self.assertEqual("simulated", params_2["option"])
self.assertEqual(0, params_2["external_reference"])
self.assertEqual(100000, params_3["metacommunity_size"])
self.assertEqual(0.001, params_3["speciation_rate"])
self.assertEqual("simulated", params_3["option"])
self.assertEqual(0, params_3["external_reference"])
self.assertEqual(51, tree.get_species_richness(1))
self.assertEqual(47, tree.get_species_richness(2))
self.assertEqual(681, tree.get_species_richness(3))
self.assertEqual(783, tree.get_species_richness(4))
self.assertEqual(247, tree.get_species_richness(5))
self.assertEqual(241, tree.get_species_richness(6))
expected_metacommunity_parameters_list = []
for reference in tree.get_community_references():
try:
params = tree.get_metacommunity_parameters(reference)
params["reference"] = reference
expected_metacommunity_parameters_list.append(params)
except KeyError:
continue
expected_metacommunity_parameters = pd.DataFrame(expected_metacommunity_parameters_list).sort_values(
["reference"]
)
actual_metacommunity_parameters = tree.get_metacommunity_parameters_pd().sort_values(["reference"])
assert_frame_equal(expected_metacommunity_parameters, actual_metacommunity_parameters, check_like=True)
def testMetacommunityAnalytical(self):
tree = CoalescenceTree(os.path.join("output", "sample_2.db"))
tree.wipe_data()
tree.set_speciation_parameters(
[0.1, 0.2], metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option="analytical"
)
tree.add_metacommunity_parameters(
metacommunity_size=15000, metacommunity_speciation_rate=0.1, metacommunity_option="analytical"
)
tree.add_metacommunity_parameters(
metacommunity_size=100000, metacommunity_speciation_rate=0.001, metacommunity_option="analytical"
)
tree.apply()
params_1 = tree.get_metacommunity_parameters(1)
params_2 = tree.get_metacommunity_parameters(2)
params_3 = tree.get_metacommunity_parameters(3)
self.assertEqual(10000, params_1["metacommunity_size"])
self.assertEqual(0.001, params_1["speciation_rate"])
self.assertEqual("analytical", params_1["option"])
self.assertEqual(0, params_1["external_reference"])
self.assertEqual(15000, params_2["metacommunity_size"])
self.assertEqual(0.1, params_2["speciation_rate"])
self.assertEqual("analytical", params_2["option"])
self.assertEqual(0, params_2["external_reference"])
self.assertEqual(100000, params_3["metacommunity_size"])
self.assertEqual(0.001, params_3["speciation_rate"])
self.assertEqual("analytical", params_3["option"])
self.assertEqual(0, params_3["external_reference"])
self.assertEqual(51, tree.get_species_richness(1))
self.assertEqual(57, tree.get_species_richness(2))
self.assertEqual(694, tree.get_species_richness(3))
self.assertEqual(760, tree.get_species_richness(4))
self.assertEqual(222, tree.get_species_richness(5))
self.assertEqual(234, tree.get_species_richness(6))
def testMetacommunityExternal(self):
tree = CoalescenceTree(os.path.join("output", "sample_3.db"))
tree.wipe_data()
tree.set_speciation_parameters([0.1, 0.2], metacommunity_option=os.path.join("sample", "nse_reference.db"))
tree.add_metacommunity_parameters(
metacommunity_option=os.path.join("sample", "nse_reference.db"), metacommunity_reference=2
)
tree.apply()
params_1 = tree.get_metacommunity_parameters(1)
params_2 = tree.get_metacommunity_parameters(2)
self.assertEqual(0, params_1["metacommunity_size"])
self.assertEqual(0.0, params_1["speciation_rate"])
self.assertEqual(os.path.join("sample", "nse_reference.db"), params_1["option"])
self.assertEqual(1, params_1["external_reference"])
self.assertEqual(0, params_2["metacommunity_size"])
self.assertEqual(0.0, params_2["speciation_rate"])
self.assertEqual(os.path.join("sample", "nse_reference.db"), params_2["option"])
self.assertEqual(2, params_2["external_reference"])
self.assertEqual(1, tree.get_species_richness(1))
self.assertEqual(1, tree.get_species_richness(2))
self.assertEqual(850, tree.get_species_richness(3))
self.assertEqual(975, tree.get_species_richness(4))
def testMetacommunityAnalyticalMethodDetection(self):
tree = CoalescenceTree(os.path.join("output", "sample_4.db"))
tree.wipe_data()
tree.set_speciation_parameters(
[0.1, 0.2], metacommunity_size=110000, metacommunity_speciation_rate=0.5, metacommunity_option="none"
)
tree.add_metacommunity_parameters(
metacommunity_speciation_rate=0.5, metacommunity_size=120000, metacommunity_option="none"
)
tree.apply()
params_1 = tree.get_metacommunity_parameters(1)
params_2 = tree.get_metacommunity_parameters(2)
self.assertEqual(110000, params_1["metacommunity_size"])
self.assertEqual(0.5, params_1["speciation_rate"])
self.assertEqual("analytical", params_1["option"])
self.assertEqual(120000, params_2["metacommunity_size"])
self.assertEqual(0.5, params_2["speciation_rate"])
self.assertEqual("analytical", params_2["option"])
def testMetacommunitySimulatedMethodDetection(self):
tree = CoalescenceTree(os.path.join("output", "sample_5.db"))
tree.wipe_data()
tree.set_speciation_parameters(
[0.1, 0.2], metacommunity_size=1000, metacommunity_speciation_rate=0.5, metacommunity_option="none"
)
tree.add_metacommunity_parameters(
metacommunity_speciation_rate=0.5, metacommunity_size=2000, metacommunity_option="none"
)
tree.apply()
params_1 = tree.get_metacommunity_parameters(1)
params_2 = tree.get_metacommunity_parameters(2)
self.assertEqual(1000, params_1["metacommunity_size"])
self.assertEqual(0.5, params_1["speciation_rate"])
self.assertEqual("simulated", params_1["option"])
self.assertEqual(2000, params_2["metacommunity_size"])
self.assertEqual(0.5, params_2["speciation_rate"])
self.assertEqual("simulated", params_2["option"])
@skipLongTest
class TestMetacommunityApplicationSpeciesAbundances(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sim = Simulation()
cls.sim.set_simulation_parameters(
seed=11, task=110, output_directory="output", min_speciation_rate=0.1, spatial=False, deme=20541
)
cls.sim.run()
cls.ct = CoalescenceTree(cls.sim)
cls.ct.wipe_data()
cls.ct.set_speciation_parameters(speciation_rates=0.1)
cls.ct.add_metacommunity_parameters(
metacommunity_option="analytical", metacommunity_size=1000000, metacommunity_speciation_rate=0.00005
)
cls.ct.add_metacommunity_parameters(
metacommunity_option="simulated", metacommunity_size=1000000, metacommunity_speciation_rate=0.00005
)
cls.ct.add_metacommunity_parameters(
metacommunity_option="analytical", metacommunity_size=1000000000, metacommunity_speciation_rate=0.1
)
cls.ct.apply()
def testRichnessMatchness(self):
self.assertAlmostEqual(244, self.ct.get_species_richness(2), delta=10)
self.assertAlmostEqual(self.ct.get_species_richness(1), self.ct.get_species_richness(2), delta=30)
self.assertEqual(5212, self.ct.get_species_richness(3))
def testSpeciesAbundances(self):
sad_1 = [x[1] for x in self.ct.get_species_abundances(reference=1)]
sad_2 = [x[1] for x in self.ct.get_species_abundances(reference=2)]
mean_1 = sum(sad_1) / len(sad_1)
mean_2 = sum(sad_2) / len(sad_2)
# Check the mean abundance is roughly equivalent
self.assertAlmostEqual(mean_1, mean_2, delta=10)
# Check that the variances are roughly equivalent
var_list_1 = [abs(x - mean_1) for x in sad_1]
var_list_2 = [abs(x - mean_2) for x in sad_2]
var_1 = sum(var_list_1) / len(var_list_1)
var_2 = sum(var_list_2) / len(var_list_2)
self.assertAlmostEqual(var_1, var_2, delta=5)
expected_abundances_list = []
for reference in self.ct.get_community_references():
for species_id, abundance in self.ct.get_species_abundances(reference=reference):
expected_abundances_list.append(
{"community_reference": reference, "species_id": species_id, "no_individuals": abundance}
)
expected_abundances = pd.DataFrame(expected_abundances_list)
actual_abundances = self.ct.get_species_abundances_pd()
assert_frame_equal(actual_abundances, expected_abundances, check_like=True)
class TestMetacommunityApplicationOrdering(unittest.TestCase):
@classmethod
def setUpClass(cls):
src = os.path.join("sample", "sample3.db")
for i in [1, 2]:
dst = os.path.join("output", "sample_order_{}.db".format(i))
if os.path.exists(dst):
os.remove(dst)
shutil.copy(src, dst)
src = os.path.join("sample", "sample5.db")
for i in range(3, 6):
dst = os.path.join("output", "sample_order_{}.db".format(i))
if os.path.exists(dst):
os.remove(dst)
shutil.copy(src, dst)
cls.c1 = CoalescenceTree(os.path.join("output", "sample_order_1.db"))
cls.c2 = CoalescenceTree(os.path.join("output", "sample_order_2.db"))
cls.proc1 = CoalescenceTree(os.path.join("output", "sample_order_3.db"))
cls.proc2 = CoalescenceTree(os.path.join("output", "sample_order_4.db"))
cls.proc3 = CoalescenceTree(os.path.join("output", "sample_order_5.db"))
cls.c1.set_speciation_parameters(
[0.1, 0.5, 0.9],
metacommunity_speciation_rate=0.001,
metacommunity_option="simulated",
metacommunity_size=10000,
)
cls.c1.apply()
cls.c2.set_speciation_parameters([0.1, 0.5, 0.9])
cls.c2.add_metacommunity_parameters(
metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option="simulated"
)
cls.c2.apply()
cls.proc1.set_speciation_parameters(
[0.1, 0.5, 0.9],
protracted_speciation_min=5,
protracted_speciation_max=1000,
metacommunity_option="simulated",
metacommunity_speciation_rate=0.001,
metacommunity_size=10000,
)
cls.proc1.apply()
cls.proc2.set_speciation_parameters([0.1, 0.5, 0.9])
cls.proc2.add_metacommunity_parameters(
metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option="simulated"
)
cls.proc2.add_protracted_parameters(min_speciation_gen=5, max_speciation_gen=1000)
cls.proc2.apply()
cls.proc3.set_speciation_parameters([0.1, 0.5, 0.9])
cls.proc3.add_protracted_parameters(min_speciation_gen=5, max_speciation_gen=1000)
cls.proc3.add_metacommunity_parameters(
metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option="simulated"
)
cls.proc3.apply()
def testEquivalentMethodsMatch(self):
for i in range(1, 4):
self.assertEqual(self.c1.get_species_richness(i), self.c2.get_species_richness(i))
self.assertEqual(self.proc1.get_species_richness(i), self.proc2.get_species_richness(i))
self.assertEqual(self.proc2.get_species_richness(i), self.proc3.get_species_richness(i))
def testMultipleProtractedError(self):
with self.assertRaises(ValueError):
self.proc2.add_multiple_protracted_parameters()
class TestProtractedSpeciationEquality(unittest.TestCase):
@classmethod
def setUpClass(cls):
dst = os.path.join("output", "sample_protracted3.db")
shutil.copy(os.path.join("sample", "sample3.db"), dst)
cls.ct = CoalescenceTree(dst)
cls.ct.wipe_data()
def testApplyEqualParameters(self):
self.ct.set_speciation_parameters(
[0.001, 0.1], protracted_speciation_min=100.0, protracted_speciation_max=10000.0
)
self.ct.apply()
self.assertEqual(1, self.ct.get_species_richness(1))
self.assertEqual(3, self.ct.get_species_richness(2))
class TestSpeciesAgesCalculations(unittest.TestCase):
@classmethod
def setUpClass(cls):
src = os.path.join("sample", "sample6.db")
dst = os.path.join("output", "sample6.db")
if os.path.exists(dst):
os.remove(dst)
shutil.copy(src, dst)
cls.dst_file = dst
def testSmallSimulation(self):
tree = CoalescenceTree(logging_level=50)
tree.set_database(self.dst_file)
with self.assertRaises(IOError):
_ = tree.get_species_ages()
with self.assertRaises(IOError):
_ = tree.get_species_ages_pd()
tree.wipe_data()
with self.assertRaises(IOError):
_ = tree.get_species_ages()
with self.assertRaises(IOError):
_ = tree.get_species_ages_pd()
tree.set_speciation_parameters(
speciation_rates=[0.000001, 0.0001],
record_spatial=False,
record_ages=True,
)
tree.apply()
self.assertTrue(check_sql_table_exist(tree.database, "SPECIES_AGES"))
expected_df = pd.read_csv(os.path.join("sample", "expected_species_ages.csv"))
actual_df = tree.get_species_ages_pd().reset_index(drop=True)
assert_frame_equal(expected_df, actual_df)
for community_ref, group in expected_df.groupby(["community_reference"]):
actual_output = sorted(tree.get_species_ages(community_ref), key=lambda x: x[0])
expected_output = group.drop(columns=["community_reference"]).sort_values(by=["species_id"]).values.tolist()
for ex, act in zip(expected_output, actual_output):
self.assertEqual(ex[0], act[0])
self.assertAlmostEqual(ex[1], act[1], delta=0.0000001)
| true | true |
f71e7fc0b5d8bec62882115f024b707c4da34b3b | 10,111 | py | Python | xapp-image-base/swagger/swagger_client/models/detailed_gear.py | martinsallandm/hw-xapp-python-lenovo | 2123289d3a5ea7122607dea8e8f0d03a348d131b | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | xapp-image-base/swagger/swagger_client/models/detailed_gear.py | martinsallandm/hw-xapp-python-lenovo | 2123289d3a5ea7122607dea8e8f0d03a348d131b | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | xapp-image-base/swagger/swagger_client/models/detailed_gear.py | martinsallandm/hw-xapp-python-lenovo | 2123289d3a5ea7122607dea8e8f0d03a348d131b | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # coding: utf-8
"""
Strava API v3
The [Swagger Playground](https://developers.strava.com/playground) is the easiest way to familiarize yourself with the Strava API by submitting HTTP requests and observing the responses before you write any client code. It will show what a response will look like with different endpoints depending on the authorization scope you receive from your athletes. To use the Playground, go to https://www.strava.com/settings/api and change your “Authorization Callback Domain” to developers.strava.com. Please note, we only support Swagger 2.0. There is a known issue where you can only select one scope at a time. For more information, please check the section “client code” at https://developers.strava.com/docs. # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DetailedGear(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'resource_state': 'int',
'primary': 'bool',
'name': 'str',
'distance': 'float',
'brand_name': 'str',
'model_name': 'str',
'frame_type': 'int',
'description': 'str'
}
attribute_map = {
'id': 'id',
'resource_state': 'resource_state',
'primary': 'primary',
'name': 'name',
'distance': 'distance',
'brand_name': 'brand_name',
'model_name': 'model_name',
'frame_type': 'frame_type',
'description': 'description'
}
def __init__(self, id=None, resource_state=None, primary=None, name=None, distance=None, brand_name=None, model_name=None, frame_type=None, description=None): # noqa: E501
"""DetailedGear - a model defined in Swagger""" # noqa: E501
self._id = None
self._resource_state = None
self._primary = None
self._name = None
self._distance = None
self._brand_name = None
self._model_name = None
self._frame_type = None
self._description = None
self.discriminator = None
if id is not None:
self.id = id
if resource_state is not None:
self.resource_state = resource_state
if primary is not None:
self.primary = primary
if name is not None:
self.name = name
if distance is not None:
self.distance = distance
if brand_name is not None:
self.brand_name = brand_name
if model_name is not None:
self.model_name = model_name
if frame_type is not None:
self.frame_type = frame_type
if description is not None:
self.description = description
@property
def id(self):
"""Gets the id of this DetailedGear. # noqa: E501
The gear's unique identifier. # noqa: E501
:return: The id of this DetailedGear. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this DetailedGear.
The gear's unique identifier. # noqa: E501
:param id: The id of this DetailedGear. # noqa: E501
:type: str
"""
self._id = id
@property
def resource_state(self):
"""Gets the resource_state of this DetailedGear. # noqa: E501
Resource state, indicates level of detail. Possible values: 2 -> \"summary\", 3 -> \"detail\" # noqa: E501
:return: The resource_state of this DetailedGear. # noqa: E501
:rtype: int
"""
return self._resource_state
@resource_state.setter
def resource_state(self, resource_state):
"""Sets the resource_state of this DetailedGear.
Resource state, indicates level of detail. Possible values: 2 -> \"summary\", 3 -> \"detail\" # noqa: E501
:param resource_state: The resource_state of this DetailedGear. # noqa: E501
:type: int
"""
self._resource_state = resource_state
@property
def primary(self):
"""Gets the primary of this DetailedGear. # noqa: E501
Whether this gear's is the owner's default one. # noqa: E501
:return: The primary of this DetailedGear. # noqa: E501
:rtype: bool
"""
return self._primary
@primary.setter
def primary(self, primary):
"""Sets the primary of this DetailedGear.
Whether this gear's is the owner's default one. # noqa: E501
:param primary: The primary of this DetailedGear. # noqa: E501
:type: bool
"""
self._primary = primary
@property
def name(self):
"""Gets the name of this DetailedGear. # noqa: E501
The gear's name. # noqa: E501
:return: The name of this DetailedGear. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this DetailedGear.
The gear's name. # noqa: E501
:param name: The name of this DetailedGear. # noqa: E501
:type: str
"""
self._name = name
@property
def distance(self):
"""Gets the distance of this DetailedGear. # noqa: E501
The distance logged with this gear. # noqa: E501
:return: The distance of this DetailedGear. # noqa: E501
:rtype: float
"""
return self._distance
@distance.setter
def distance(self, distance):
"""Sets the distance of this DetailedGear.
The distance logged with this gear. # noqa: E501
:param distance: The distance of this DetailedGear. # noqa: E501
:type: float
"""
self._distance = distance
@property
def brand_name(self):
"""Gets the brand_name of this DetailedGear. # noqa: E501
The gear's brand name. # noqa: E501
:return: The brand_name of this DetailedGear. # noqa: E501
:rtype: str
"""
return self._brand_name
@brand_name.setter
def brand_name(self, brand_name):
"""Sets the brand_name of this DetailedGear.
The gear's brand name. # noqa: E501
:param brand_name: The brand_name of this DetailedGear. # noqa: E501
:type: str
"""
self._brand_name = brand_name
@property
def model_name(self):
"""Gets the model_name of this DetailedGear. # noqa: E501
The gear's model name. # noqa: E501
:return: The model_name of this DetailedGear. # noqa: E501
:rtype: str
"""
return self._model_name
@model_name.setter
def model_name(self, model_name):
"""Sets the model_name of this DetailedGear.
The gear's model name. # noqa: E501
:param model_name: The model_name of this DetailedGear. # noqa: E501
:type: str
"""
self._model_name = model_name
@property
def frame_type(self):
"""Gets the frame_type of this DetailedGear. # noqa: E501
The gear's frame type (bike only). # noqa: E501
:return: The frame_type of this DetailedGear. # noqa: E501
:rtype: int
"""
return self._frame_type
@frame_type.setter
def frame_type(self, frame_type):
"""Sets the frame_type of this DetailedGear.
The gear's frame type (bike only). # noqa: E501
:param frame_type: The frame_type of this DetailedGear. # noqa: E501
:type: int
"""
self._frame_type = frame_type
@property
def description(self):
"""Gets the description of this DetailedGear. # noqa: E501
The gear's description. # noqa: E501
:return: The description of this DetailedGear. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this DetailedGear.
The gear's description. # noqa: E501
:param description: The description of this DetailedGear. # noqa: E501
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DetailedGear, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DetailedGear):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.564327 | 726 | 0.588666 |
import pprint
import re
import six
class DetailedGear(object):
swagger_types = {
'id': 'str',
'resource_state': 'int',
'primary': 'bool',
'name': 'str',
'distance': 'float',
'brand_name': 'str',
'model_name': 'str',
'frame_type': 'int',
'description': 'str'
}
attribute_map = {
'id': 'id',
'resource_state': 'resource_state',
'primary': 'primary',
'name': 'name',
'distance': 'distance',
'brand_name': 'brand_name',
'model_name': 'model_name',
'frame_type': 'frame_type',
'description': 'description'
}
def __init__(self, id=None, resource_state=None, primary=None, name=None, distance=None, brand_name=None, model_name=None, frame_type=None, description=None):
self._id = None
self._resource_state = None
self._primary = None
self._name = None
self._distance = None
self._brand_name = None
self._model_name = None
self._frame_type = None
self._description = None
self.discriminator = None
if id is not None:
self.id = id
if resource_state is not None:
self.resource_state = resource_state
if primary is not None:
self.primary = primary
if name is not None:
self.name = name
if distance is not None:
self.distance = distance
if brand_name is not None:
self.brand_name = brand_name
if model_name is not None:
self.model_name = model_name
if frame_type is not None:
self.frame_type = frame_type
if description is not None:
self.description = description
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def resource_state(self):
return self._resource_state
@resource_state.setter
def resource_state(self, resource_state):
self._resource_state = resource_state
@property
def primary(self):
return self._primary
@primary.setter
def primary(self, primary):
self._primary = primary
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def distance(self):
return self._distance
@distance.setter
def distance(self, distance):
self._distance = distance
@property
def brand_name(self):
return self._brand_name
@brand_name.setter
def brand_name(self, brand_name):
self._brand_name = brand_name
@property
def model_name(self):
return self._model_name
@model_name.setter
def model_name(self, model_name):
self._model_name = model_name
@property
def frame_type(self):
return self._frame_type
@frame_type.setter
def frame_type(self, frame_type):
self._frame_type = frame_type
@property
def description(self):
return self._description
@description.setter
def description(self, description):
self._description = description
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DetailedGear, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, DetailedGear):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f71e805336c27b6f2b7f452d5fbab1f6282e8202 | 2,315 | py | Python | aliyun-python-sdk-bssopenapi/aliyunsdkbssopenapi/request/v20171214/QueryResourcePackageInstancesRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-bssopenapi/aliyunsdkbssopenapi/request/v20171214/QueryResourcePackageInstancesRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-bssopenapi/aliyunsdkbssopenapi/request/v20171214/QueryResourcePackageInstancesRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkbssopenapi.endpoint import endpoint_data
class QueryResourcePackageInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'BssOpenApi', '2017-12-14', 'QueryResourcePackageInstances')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ExpiryTimeEnd(self):
return self.get_query_params().get('ExpiryTimeEnd')
def set_ExpiryTimeEnd(self,ExpiryTimeEnd):
self.add_query_param('ExpiryTimeEnd',ExpiryTimeEnd)
def get_ProductCode(self):
return self.get_query_params().get('ProductCode')
def set_ProductCode(self,ProductCode):
self.add_query_param('ProductCode',ProductCode)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ExpiryTimeStart(self):
return self.get_query_params().get('ExpiryTimeStart')
def set_ExpiryTimeStart(self,ExpiryTimeStart):
self.add_query_param('ExpiryTimeStart',ExpiryTimeStart)
def get_PageNum(self):
return self.get_query_params().get('PageNum')
def set_PageNum(self,PageNum):
self.add_query_param('PageNum',PageNum)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize) | 34.552239 | 88 | 0.770194 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkbssopenapi.endpoint import endpoint_data
class QueryResourcePackageInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'BssOpenApi', '2017-12-14', 'QueryResourcePackageInstances')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ExpiryTimeEnd(self):
return self.get_query_params().get('ExpiryTimeEnd')
def set_ExpiryTimeEnd(self,ExpiryTimeEnd):
self.add_query_param('ExpiryTimeEnd',ExpiryTimeEnd)
def get_ProductCode(self):
return self.get_query_params().get('ProductCode')
def set_ProductCode(self,ProductCode):
self.add_query_param('ProductCode',ProductCode)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ExpiryTimeStart(self):
return self.get_query_params().get('ExpiryTimeStart')
def set_ExpiryTimeStart(self,ExpiryTimeStart):
self.add_query_param('ExpiryTimeStart',ExpiryTimeStart)
def get_PageNum(self):
return self.get_query_params().get('PageNum')
def set_PageNum(self,PageNum):
self.add_query_param('PageNum',PageNum)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize) | true | true |
f71e80a48abdef12de18705d859ad1449bdec6da | 7,884 | py | Python | xAIbenchmark.py | cmougan/OODBenchmark | e5d7b9540840afe64f6a00139cbc41a44ed01a80 | [
"MIT"
] | null | null | null | xAIbenchmark.py | cmougan/OODBenchmark | e5d7b9540840afe64f6a00139cbc41a44ed01a80 | [
"MIT"
] | null | null | null | xAIbenchmark.py | cmougan/OODBenchmark | e5d7b9540840afe64f6a00139cbc41a44ed01a80 | [
"MIT"
] | null | null | null | # %%
from pmlb import fetch_data
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_predict, KFold
from sklearn.metrics import mean_squared_error, roc_auc_score
from tqdm import tqdm
import pandas as pd
import numpy as np
from collections import defaultdict
import warnings
import re
import traceback
from pmlb import classification_dataset_names, regression_dataset_names
from benchmark import benchmark_experiment
from sklearn.linear_model import Lasso, LinearRegression
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
import warnings
from fairtools.xaiUtils import ShapEstimator
import xgboost
warnings.filterwarnings("ignore")
# %%
def benchmark_experiment(datasets: list, model, classification: str = "classification"):
assert classification in [
"classification",
"regression",
"explainableAI",
], "Classification type introduced --{}-- does not match: classification,regression,explainableAI".format(
classification
)
if classification == "classification":
extension = "_clas"
elif classification == "regression":
extension = "_reg"
elif classification == "explainableAI":
extension = "_explain"
else:
raise "Classification type not contained"
results = defaultdict()
for i, dataset in enumerate(datasets):
try:
# Initialise the scaler
standard_scaler = StandardScaler()
# Load the dataset and split it
X, y = fetch_data(dataset, return_X_y=True, local_cache_dir="data/")
# Scale the dataset
X = standard_scaler.fit_transform(X)
if classification == False:
y = standard_scaler.fit_transform(y.reshape(-1, 1))
# Back to dataframe
X = pd.DataFrame(X, columns=["Var %d" % (i + 1) for i in range(X.shape[1])])
data = X.copy()
data["target"] = y
# Min and max data limits for the experiment
if X.shape[0] < 100:
continue
if X.shape[0] > 100_000:
continue
# Train test splitting points
fracc = 0.33
oneThird = int(data.shape[0] * fracc)
twoThird = data.shape[0] - int(data.shape[0] * fracc)
for idx, col in tqdm(enumerate(X.columns), total=len(X.columns)):
# Sort data on the column
data = data.sort_values(col).reset_index(drop=True).copy()
# Train Test Split
data_sub = data.iloc[:oneThird]
data_train = data.iloc[oneThird:twoThird]
data_up = data.iloc[twoThird:]
X_tot = data.drop(columns="target")
X_tr = data_train.drop(columns="target")
X_sub = data_sub.drop(columns="target")
X_up = data_up.drop(columns="target")
y_tot = data[["target"]].target.values
y_tr = data_train[["target"]].target.values
y_sub = data_sub[["target"]].target.values
y_up = data_up[["target"]].target.values
# Error Calculation
if classification == "classification":
## Test predictions
pred_test = cross_val_predict(
estimator=model,
X=X_tr,
y=y_tr,
cv=KFold(n_splits=5, shuffle=True, random_state=0),
method="predict_proba",
)[:, 1]
## Train
model.fit(X_tr, y_tr)
pred_train = model.predict_proba(X_tr)[:, 1]
## OOD
X_ood = X_sub.append(X_up)
y_ood = np.concatenate((y_sub, y_up))
pred_ood = model.predict_proba(X_ood)[:, 1]
train_error = roc_auc_score(y_tr, pred_train)
test_error = roc_auc_score(y_tr, pred_test)
ood_error = roc_auc_score(y_ood, pred_ood)
generalizationError = test_error - train_error
ood_performance = ood_error - test_error
elif classification == "regression":
## Test predictions
pred_test = cross_val_predict(
estimator=model,
X=X_tr,
y=y_tr,
cv=KFold(n_splits=5, shuffle=True, random_state=0),
)
## Train
model.fit(X_tr, y_tr)
pred_train = model.predict(X_tr)
## OOD
X_ood = X_sub.append(X_up)
y_ood = np.concatenate((y_sub, y_up))
pred_ood = model.predict(X_ood)
train_error = mean_squared_error(pred_train, y_tr)
test_error = mean_squared_error(pred_test, y_tr)
ood_error = mean_squared_error(pred_ood, y_ood)
generalizationError = test_error - train_error
ood_performance = ood_error - test_error
elif classification == "explainableAI":
# Explainer predictor
se = ShapEstimator(model=xgboost.XGBRegressor())
shap_pred_tr = cross_val_predict(se, X_tr, y_tr, cv=3)
## Test predictions
pred_test = cross_val_predict(
estimator=model,
X=shap_pred_tr,
y=y_tr,
cv=KFold(n_splits=5, shuffle=True, random_state=0),
)
## Train
se.fit(X_tr, y_tr)
model.fit(shap_pred_tr, y_tr)
pred_train = model.predict(shap_pred_tr)
## Generate OOD Shap data
X_ood = X_sub.append(X_up)
y_ood = np.concatenate((y_sub, y_up))
shap_pred_ood = se.predict(X_ood)
## OOD
pred_ood = model.predict(shap_pred_ood)
train_error = mean_squared_error(pred_train, y_tr)
test_error = mean_squared_error(pred_test, y_tr)
ood_error = mean_squared_error(pred_ood, y_ood)
generalizationError = test_error - train_error
ood_performance = ood_error - test_error
# Append Results
model_name = str(type(model)).split(".")[-1]
model_name = re.sub("[^A-Za-z0-9]+", "", model_name)
name = dataset + "_column_" + col
results[name] = [
train_error,
test_error,
ood_error,
generalizationError,
ood_performance,
model_name,
]
except Exception:
print(traceback.format_exc())
print("Not Working:", dataset)
print("Dataset shape:", len(dataset))
pass
df = pd.DataFrame(data=results).T
df.columns = [
"trainError",
"testError",
"oodError",
"generalizationError",
"oodPerformance",
"model",
]
df.to_csv("results/" + model_name + extension + ".csv")
# %%
regression_dataset_names_sample = regression_dataset_names[:10]
# %%
modelitos = [
GradientBoostingRegressor(),
]
for m in modelitos:
benchmark_experiment(
datasets=regression_dataset_names_sample,
model=m,
classification="explainableAI",
)
# %%
| 35.513514 | 110 | 0.535769 |
from pmlb import fetch_data
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_predict, KFold
from sklearn.metrics import mean_squared_error, roc_auc_score
from tqdm import tqdm
import pandas as pd
import numpy as np
from collections import defaultdict
import warnings
import re
import traceback
from pmlb import classification_dataset_names, regression_dataset_names
from benchmark import benchmark_experiment
from sklearn.linear_model import Lasso, LinearRegression
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
import warnings
from fairtools.xaiUtils import ShapEstimator
import xgboost
warnings.filterwarnings("ignore")
def benchmark_experiment(datasets: list, model, classification: str = "classification"):
assert classification in [
"classification",
"regression",
"explainableAI",
], "Classification type introduced --{}-- does not match: classification,regression,explainableAI".format(
classification
)
if classification == "classification":
extension = "_clas"
elif classification == "regression":
extension = "_reg"
elif classification == "explainableAI":
extension = "_explain"
else:
raise "Classification type not contained"
results = defaultdict()
for i, dataset in enumerate(datasets):
try:
standard_scaler = StandardScaler()
X, y = fetch_data(dataset, return_X_y=True, local_cache_dir="data/")
X = standard_scaler.fit_transform(X)
if classification == False:
y = standard_scaler.fit_transform(y.reshape(-1, 1))
X = pd.DataFrame(X, columns=["Var %d" % (i + 1) for i in range(X.shape[1])])
data = X.copy()
data["target"] = y
if X.shape[0] < 100:
continue
if X.shape[0] > 100_000:
continue
fracc = 0.33
oneThird = int(data.shape[0] * fracc)
twoThird = data.shape[0] - int(data.shape[0] * fracc)
for idx, col in tqdm(enumerate(X.columns), total=len(X.columns)):
data = data.sort_values(col).reset_index(drop=True).copy()
data_sub = data.iloc[:oneThird]
data_train = data.iloc[oneThird:twoThird]
data_up = data.iloc[twoThird:]
X_tot = data.drop(columns="target")
X_tr = data_train.drop(columns="target")
X_sub = data_sub.drop(columns="target")
X_up = data_up.drop(columns="target")
y_tot = data[["target"]].target.values
y_tr = data_train[["target"]].target.values
y_sub = data_sub[["target"]].target.values
y_up = data_up[["target"]].target.values
if classification == "classification":
pred_test = cross_val_predict(
estimator=model,
X=X_tr,
y=y_tr,
cv=KFold(n_splits=5, shuffle=True, random_state=0),
method="predict_proba",
)[:, 1]
model.fit(X_tr, y_tr)
pred_train = model.predict_proba(X_tr)[:, 1]
X_ood = X_sub.append(X_up)
y_ood = np.concatenate((y_sub, y_up))
pred_ood = model.predict_proba(X_ood)[:, 1]
train_error = roc_auc_score(y_tr, pred_train)
test_error = roc_auc_score(y_tr, pred_test)
ood_error = roc_auc_score(y_ood, pred_ood)
generalizationError = test_error - train_error
ood_performance = ood_error - test_error
elif classification == "regression":
pred_test = cross_val_predict(
estimator=model,
X=X_tr,
y=y_tr,
cv=KFold(n_splits=5, shuffle=True, random_state=0),
)
model.fit(X_tr, y_tr)
pred_train = model.predict(X_tr)
X_ood = X_sub.append(X_up)
y_ood = np.concatenate((y_sub, y_up))
pred_ood = model.predict(X_ood)
train_error = mean_squared_error(pred_train, y_tr)
test_error = mean_squared_error(pred_test, y_tr)
ood_error = mean_squared_error(pred_ood, y_ood)
generalizationError = test_error - train_error
ood_performance = ood_error - test_error
elif classification == "explainableAI":
se = ShapEstimator(model=xgboost.XGBRegressor())
shap_pred_tr = cross_val_predict(se, X_tr, y_tr, cv=3)
pred_test = cross_val_predict(
estimator=model,
X=shap_pred_tr,
y=y_tr,
cv=KFold(n_splits=5, shuffle=True, random_state=0),
)
se.fit(X_tr, y_tr)
model.fit(shap_pred_tr, y_tr)
pred_train = model.predict(shap_pred_tr)
od = X_sub.append(X_up)
y_ood = np.concatenate((y_sub, y_up))
shap_pred_ood = se.predict(X_ood)
pred_ood = model.predict(shap_pred_ood)
train_error = mean_squared_error(pred_train, y_tr)
test_error = mean_squared_error(pred_test, y_tr)
ood_error = mean_squared_error(pred_ood, y_ood)
generalizationError = test_error - train_error
ood_performance = ood_error - test_error
model_name = str(type(model)).split(".")[-1]
model_name = re.sub("[^A-Za-z0-9]+", "", model_name)
name = dataset + "_column_" + col
results[name] = [
train_error,
test_error,
ood_error,
generalizationError,
ood_performance,
model_name,
]
except Exception:
print(traceback.format_exc())
print("Not Working:", dataset)
print("Dataset shape:", len(dataset))
pass
df = pd.DataFrame(data=results).T
df.columns = [
"trainError",
"testError",
"oodError",
"generalizationError",
"oodPerformance",
"model",
]
df.to_csv("results/" + model_name + extension + ".csv")
regression_dataset_names_sample = regression_dataset_names[:10]
modelitos = [
GradientBoostingRegressor(),
]
for m in modelitos:
benchmark_experiment(
datasets=regression_dataset_names_sample,
model=m,
classification="explainableAI",
)
| true | true |
f71e80df92838cc46c6391213b7e32330e5f7bba | 2,216 | py | Python | count_calls/count_calls.py | chimicus/addons | 0fa1110df999fc9a8622a12e00453fc67b62fce1 | [
"BSD-3-Clause"
] | null | null | null | count_calls/count_calls.py | chimicus/addons | 0fa1110df999fc9a8622a12e00453fc67b62fce1 | [
"BSD-3-Clause"
] | 6 | 2019-08-23T15:53:05.000Z | 2021-07-14T08:24:06.000Z | count_calls/count_calls.py | chimicus/addons | 0fa1110df999fc9a8622a12e00453fc67b62fce1 | [
"BSD-3-Clause"
] | 3 | 2019-11-04T12:02:11.000Z | 2020-03-05T13:57:11.000Z | #! /usr/bin/env udb-automate
import sys
import textwrap
from undodb.udb_launcher import (
REDIRECTION_COLLECT,
UdbLauncher,
)
def main(argv):
# Get the arguments from the command line.
try:
recording, func_name = argv[1:]
except ValueError:
# Wrong number of arguments.
print("{} RECORDING_FILE FUNCTION_NAME".format(sys.argv[0]))
raise SystemExit(1)
# Prepare for launching UDB.
launcher = UdbLauncher()
# Make UDB run with our recording.
launcher.recording_file = recording
# Make UDB load the count_calls_extension.py file from the current
# directory.
launcher.add_extension("count_calls_extension")
# Tell the extension which function name it needs to check.
# The run_data attribute is a dictionary in which arbitrary data can be
# stored and passed to the extension (as long as it can be serialised using
# the Python pickle module).
launcher.run_data["func_name"] = func_name
# Finally, launch UDB!
# We collect the output as, in normal conditions, we don't want to show it
# to the user but, in case of errors, we want to display it.
res = launcher.run_debugger(redirect_debugger_output=REDIRECTION_COLLECT)
if res.exit_code == 0:
# All good as UDB exited with exit code 0 (i.e. no errors).
print(
'The recording hit "{}" {} time(s).'.format(
func_name,
# The result_data attribute is analogous to UdbLauncher.run_data but
# it's used to pass information the opposite way, from the extension
# to this script.
res.result_data["hit-count"],
)
)
else:
# Something went wrong! Print a useful message.
print(
textwrap.dedent(
"""\
Error!
UDB exited with code {res.exit_code}.
The output was:
{res.output}
"""
).format(res=res),
file=sys.stderr,
)
# Exit this script with the same error code as UDB.
raise SystemExit(res.exit_code)
if __name__ == "__main__":
main(sys.argv)
| 31.657143 | 84 | 0.610108 |
import sys
import textwrap
from undodb.udb_launcher import (
REDIRECTION_COLLECT,
UdbLauncher,
)
def main(argv):
try:
recording, func_name = argv[1:]
except ValueError:
print("{} RECORDING_FILE FUNCTION_NAME".format(sys.argv[0]))
raise SystemExit(1)
launcher = UdbLauncher()
launcher.recording_file = recording
launcher.add_extension("count_calls_extension")
launcher.run_data["func_name"] = func_name
# to the user but, in case of errors, we want to display it.
res = launcher.run_debugger(redirect_debugger_output=REDIRECTION_COLLECT)
if res.exit_code == 0:
# All good as UDB exited with exit code 0 (i.e. no errors).
print(
'The recording hit "{}" {} time(s).'.format(
func_name,
# The result_data attribute is analogous to UdbLauncher.run_data but
# it's used to pass information the opposite way, from the extension
res.result_data["hit-count"],
)
)
else:
print(
textwrap.dedent(
"""\
Error!
UDB exited with code {res.exit_code}.
The output was:
{res.output}
"""
).format(res=res),
file=sys.stderr,
)
raise SystemExit(res.exit_code)
if __name__ == "__main__":
main(sys.argv)
| true | true |
f71e8181713f3cb6ee3858ac80a6422e514e559e | 17,192 | py | Python | obswebsocket/events.py | SirCleric/obs-websocket-py | f04104ab1db7f9164d0d9fe9842232450fc72048 | [
"MIT"
] | null | null | null | obswebsocket/events.py | SirCleric/obs-websocket-py | f04104ab1db7f9164d0d9fe9842232450fc72048 | [
"MIT"
] | null | null | null | obswebsocket/events.py | SirCleric/obs-websocket-py | f04104ab1db7f9164d0d9fe9842232450fc72048 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
### THIS FILE WAS GENERATED BY generate_classes.py - DO NOT EDIT ###
### (Generated on 2018-11-01 23:48:48.307368) ###
from . import base_classes
class SourceOrderChanged(base_classes.Baseevents):
"""Scene items have been reordered.
:Returns:
*name*
type: String
Name of the scene where items have been reordered.
*sources*
type: Array
Array of sources.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SourceOrderChanged"
self.datain["name"] = None
self.datain["sources"] = None
def getName(self):
return self.datain["name"]
def getSources(self):
return self.datain["sources"]
class SceneItemTransformChanged(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneItemTransformChanged"
self.datain["scene-name"] = None
self.datain["item-name"] = None
self.datain["item-id"] = None
self.datain["transform"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getItemName(self):
return self.datain["item-name"]
def getItemId(self):
return self.datain["item-id"]
def getItemTransform(self):
return self.datain["transform"]
class SceneItemAdded(base_classes.Baseevents):
"""An item has been added to the current scene.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item added to the scene.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneItemAdded"
self.datain["scene-name"] = None
self.datain["item-name"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getItemName(self):
return self.datain["item-name"]
class SceneItemRemoved(base_classes.Baseevents):
"""An item has been removed from the current scene.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item removed from the scene.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneItemRemoved"
self.datain["scene-name"] = None
self.datain["item-name"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getItemName(self):
return self.datain["item-name"]
class SceneItemVisibilityChanged(base_classes.Baseevents):
"""An item's visibility has been toggled.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item in the scene.
*item_visible*
type: boolean
New visibility state of the item.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneItemVisibilityChanged"
self.datain["scene-name"] = None
self.datain["item-name"] = None
self.datain["item-visible"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getItemName(self):
return self.datain["item-name"]
def getItemVisible(self):
return self.datain["item-visible"]
class PreviewSceneChanged(base_classes.Baseevents):
"""The selected preview scene has changed (only available in Studio Mode).
:Returns:
*scene_name*
type: String
Name of the scene being previewed.
*sources*
type: Source|Array
List of sources composing the scene. Same specification as [`GetCurrentScene`](#getcurrentscene).
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "PreviewSceneChanged"
self.datain["scene-name"] = None
self.datain["sources"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getSources(self):
return self.datain["sources"]
class StudioModeSwitched(base_classes.Baseevents):
"""Studio Mode has been enabled or disabled.
:Returns:
*new_state*
type: boolean
The new enabled state of Studio Mode.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StudioModeSwitched"
self.datain["new-state"] = None
def getNewState(self):
return self.datain["new-state"]
class ReplayStarting(base_classes.Baseevents):
"""A request to start the replay buffer has been issued.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ReplayStarting"
class ReplayStarted(base_classes.Baseevents):
"""Replay Buffer started successfully
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ReplayStarted"
class ReplayStopping(base_classes.Baseevents):
"""A request to stop the replay buffer has been issued.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ReplayStopping"
class ReplayStopped(base_classes.Baseevents):
"""Replay Buffer stopped successfully
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ReplayStopped"
class SwitchScenes(base_classes.Baseevents):
"""Indicates a scene change.
:Returns:
*scene_name*
type: String
The new scene.
*sources*
type: Array
List of sources in the new scene.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SwitchScenes"
self.datain["scene-name"] = None
self.datain["sources"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getSources(self):
return self.datain["sources"]
class ScenesChanged(base_classes.Baseevents):
"""The scene list has been modified.
Scenes have been added, removed, or renamed.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ScenesChanged"
class SceneCollectionChanged(base_classes.Baseevents):
"""Triggered when switching to another scene collection or when renaming the current scene collection.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneCollectionChanged"
class SceneCollectionListChanged(base_classes.Baseevents):
"""Triggered when a scene collection is created, added, renamed, or removed.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneCollectionListChanged"
class ProfileChanged(base_classes.Baseevents):
"""Triggered when switching to another profile or when renaming the current profile.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ProfileChanged"
class ProfileListChanged(base_classes.Baseevents):
"""Triggered when a profile is created, added, renamed, or removed.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ProfileListChanged"
class Heartbeat(base_classes.Baseevents):
"""Emitted every 2 seconds after enabling it by calling SetHeartbeat.
:Returns:
*pulse*
type: boolean
Toggles between every JSON message as an "I am alive" indicator.
*current_profile*
type: string (optional)
Current active profile.
*current_scene*
type: string (optional)
Current active scene.
*streaming*
type: boolean (optional)
Current streaming state.
*total_stream_time*
type: int (optional)
Total time (in seconds) since the stream started.
*total_stream_bytes*
type: int (optional)
Total bytes sent since the stream started.
*total_stream_frames*
type: int (optional)
Total frames streamed since the stream started.
*recording*
type: boolean (optional)
Current recording state.
*total_record_time*
type: int (optional)
Total time (in seconds) since recording started.
*total_record_bytes*
type: int (optional)
Total bytes recorded since the recording started.
*total_record_frames*
type: int (optional)
Total frames recorded since the recording started.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "Heartbeat"
self.datain["pulse"] = None
self.datain["current-profile"] = None
self.datain["current-scene"] = None
self.datain["streaming"] = None
self.datain["total-stream-time"] = None
self.datain["total-stream-bytes"] = None
self.datain["total-stream-frames"] = None
self.datain["recording"] = None
self.datain["total-record-time"] = None
self.datain["total-record-bytes"] = None
self.datain["total-record-frames"] = None
def getPulse(self):
return self.datain["pulse"]
def getCurrentProfile(self):
return self.datain["current-profile"]
def getCurrentScene(self):
return self.datain["current-scene"]
def getStreaming(self):
return self.datain["streaming"]
def getTotalStreamTime(self):
return self.datain["total-stream-time"]
def getTotalStreamBytes(self):
return self.datain["total-stream-bytes"]
def getTotalStreamFrames(self):
return self.datain["total-stream-frames"]
def getRecording(self):
return self.datain["recording"]
def getTotalRecordTime(self):
return self.datain["total-record-time"]
def getTotalRecordBytes(self):
return self.datain["total-record-bytes"]
def getTotalRecordFrames(self):
return self.datain["total-record-frames"]
class RecordingStarting(base_classes.Baseevents):
"""A request to start recording has been issued.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "RecordingStarting"
class RecordingStarted(base_classes.Baseevents):
"""Recording started successfully.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "RecordingStarted"
class RecordingStopping(base_classes.Baseevents):
"""A request to stop recording has been issued.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "RecordingStopping"
class RecordingStopped(base_classes.Baseevents):
"""Recording stopped successfully.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "RecordingStopped"
class StreamStarting(base_classes.Baseevents):
"""A request to start streaming has been issued.
:Returns:
*preview_only*
type: boolean
Always false (retrocompatibility).
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStarting"
self.datain["preview-only"] = None
def getPreviewOnly(self):
return self.datain["preview-only"]
class StreamStarted(base_classes.Baseevents):
"""Streaming started successfully.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStarted"
class StreamStopping(base_classes.Baseevents):
"""A request to stop streaming has been issued.
:Returns:
*preview_only*
type: boolean
Always false (retrocompatibility).
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStopping"
self.datain["preview-only"] = None
def getPreviewOnly(self):
return self.datain["preview-only"]
class StreamStopped(base_classes.Baseevents):
"""Streaming stopped successfully.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStopped"
class StreamStatus(base_classes.Baseevents):
"""Emit every 2 seconds.
:Returns:
*streaming*
type: boolean
Current streaming state.
*recording*
type: boolean
Current recording state.
*preview_only*
type: boolean
Always false (retrocompatibility).
*bytes_per_sec*
type: int
Amount of data per second (in bytes) transmitted by the stream encoder.
*kbits_per_sec*
type: int
Amount of data per second (in kilobits) transmitted by the stream encoder.
*strain*
type: double
Percentage of dropped frames.
*total_stream_time*
type: int
Total time (in seconds) since the stream started.
*num_total_frames*
type: int
Total number of frames transmitted since the stream started.
*num_dropped_frames*
type: int
Number of frames dropped by the encoder since the stream started.
*fps*
type: double
Current framerate.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStatus"
self.datain["streaming"] = None
self.datain["recording"] = None
self.datain["preview-only"] = None
self.datain["bytes-per-sec"] = None
self.datain["kbits-per-sec"] = None
self.datain["strain"] = None
self.datain["total-stream-time"] = None
self.datain["num-total-frames"] = None
self.datain["num-dropped-frames"] = None
self.datain["fps"] = None
def getStreaming(self):
return self.datain["streaming"]
def getRecording(self):
return self.datain["recording"]
def getPreviewOnly(self):
return self.datain["preview-only"]
def getBytesPerSec(self):
return self.datain["bytes-per-sec"]
def getKbitsPerSec(self):
return self.datain["kbits-per-sec"]
def getStrain(self):
return self.datain["strain"]
def getTotalStreamTime(self):
return self.datain["total-stream-time"]
def getNumTotalFrames(self):
return self.datain["num-total-frames"]
def getNumDroppedFrames(self):
return self.datain["num-dropped-frames"]
def getFps(self):
return self.datain["fps"]
class Exiting(base_classes.Baseevents):
"""OBS is exiting.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "Exiting"
class SwitchTransition(base_classes.Baseevents):
"""The active transition has been changed.
:Returns:
*transition_name*
type: String
The name of the new active transition.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SwitchTransition"
self.datain["transition-name"] = None
def getTransitionName(self):
return self.datain["transition-name"]
class TransitionListChanged(base_classes.Baseevents):
"""The list of available transitions has been modified.
Transitions have been added, removed, or renamed.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "TransitionListChanged"
class TransitionDurationChanged(base_classes.Baseevents):
"""The active transition duration has been changed.
:Returns:
*new_duration*
type: int
New transition duration.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "TransitionDurationChanged"
self.datain["new-duration"] = None
def getNewDuration(self):
return self.datain["new-duration"]
class TransitionBegin(base_classes.Baseevents):
"""A transition (other than "cut") has begun.
:Returns:
*name*
type: String
Transition name.
*duration*
type: int
Transition duration (in milliseconds).
*from_scene*
type: String
Source scene of the transition
*to_scene*
type: String
Destination scene of the transition
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "TransitionBegin"
self.datain["name"] = None
self.datain["duration"] = None
self.datain["from-scene"] = None
self.datain["to-scene"] = None
def getName(self):
return self.datain["name"]
def getDuration(self):
return self.datain["duration"]
def getFromScene(self):
return self.datain["from-scene"]
def getToScene(self):
return self.datain["to-scene"]
| 27.463259 | 109 | 0.627966 |
f.datain["sources"] = None
def getName(self):
return self.datain["name"]
def getSources(self):
return self.datain["sources"]
class SceneItemTransformChanged(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneItemTransformChanged"
self.datain["scene-name"] = None
self.datain["item-name"] = None
self.datain["item-id"] = None
self.datain["transform"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getItemName(self):
return self.datain["item-name"]
def getItemId(self):
return self.datain["item-id"]
def getItemTransform(self):
return self.datain["transform"]
class SceneItemAdded(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneItemAdded"
self.datain["scene-name"] = None
self.datain["item-name"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getItemName(self):
return self.datain["item-name"]
class SceneItemRemoved(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneItemRemoved"
self.datain["scene-name"] = None
self.datain["item-name"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getItemName(self):
return self.datain["item-name"]
class SceneItemVisibilityChanged(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneItemVisibilityChanged"
self.datain["scene-name"] = None
self.datain["item-name"] = None
self.datain["item-visible"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getItemName(self):
return self.datain["item-name"]
def getItemVisible(self):
return self.datain["item-visible"]
class PreviewSceneChanged(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "PreviewSceneChanged"
self.datain["scene-name"] = None
self.datain["sources"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getSources(self):
return self.datain["sources"]
class StudioModeSwitched(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StudioModeSwitched"
self.datain["new-state"] = None
def getNewState(self):
return self.datain["new-state"]
class ReplayStarting(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ReplayStarting"
class ReplayStarted(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ReplayStarted"
class ReplayStopping(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ReplayStopping"
class ReplayStopped(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ReplayStopped"
class SwitchScenes(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SwitchScenes"
self.datain["scene-name"] = None
self.datain["sources"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getSources(self):
return self.datain["sources"]
class ScenesChanged(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ScenesChanged"
class SceneCollectionChanged(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneCollectionChanged"
class SceneCollectionListChanged(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneCollectionListChanged"
class ProfileChanged(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ProfileChanged"
class ProfileListChanged(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ProfileListChanged"
class Heartbeat(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "Heartbeat"
self.datain["pulse"] = None
self.datain["current-profile"] = None
self.datain["current-scene"] = None
self.datain["streaming"] = None
self.datain["total-stream-time"] = None
self.datain["total-stream-bytes"] = None
self.datain["total-stream-frames"] = None
self.datain["recording"] = None
self.datain["total-record-time"] = None
self.datain["total-record-bytes"] = None
self.datain["total-record-frames"] = None
def getPulse(self):
return self.datain["pulse"]
def getCurrentProfile(self):
return self.datain["current-profile"]
def getCurrentScene(self):
return self.datain["current-scene"]
def getStreaming(self):
return self.datain["streaming"]
def getTotalStreamTime(self):
return self.datain["total-stream-time"]
def getTotalStreamBytes(self):
return self.datain["total-stream-bytes"]
def getTotalStreamFrames(self):
return self.datain["total-stream-frames"]
def getRecording(self):
return self.datain["recording"]
def getTotalRecordTime(self):
return self.datain["total-record-time"]
def getTotalRecordBytes(self):
return self.datain["total-record-bytes"]
def getTotalRecordFrames(self):
return self.datain["total-record-frames"]
class RecordingStarting(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "RecordingStarting"
class RecordingStarted(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "RecordingStarted"
class RecordingStopping(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "RecordingStopping"
class RecordingStopped(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "RecordingStopped"
class StreamStarting(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStarting"
self.datain["preview-only"] = None
def getPreviewOnly(self):
return self.datain["preview-only"]
class StreamStarted(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStarted"
class StreamStopping(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStopping"
self.datain["preview-only"] = None
def getPreviewOnly(self):
return self.datain["preview-only"]
class StreamStopped(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStopped"
class StreamStatus(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStatus"
self.datain["streaming"] = None
self.datain["recording"] = None
self.datain["preview-only"] = None
self.datain["bytes-per-sec"] = None
self.datain["kbits-per-sec"] = None
self.datain["strain"] = None
self.datain["total-stream-time"] = None
self.datain["num-total-frames"] = None
self.datain["num-dropped-frames"] = None
self.datain["fps"] = None
def getStreaming(self):
return self.datain["streaming"]
def getRecording(self):
return self.datain["recording"]
def getPreviewOnly(self):
return self.datain["preview-only"]
def getBytesPerSec(self):
return self.datain["bytes-per-sec"]
def getKbitsPerSec(self):
return self.datain["kbits-per-sec"]
def getStrain(self):
return self.datain["strain"]
def getTotalStreamTime(self):
return self.datain["total-stream-time"]
def getNumTotalFrames(self):
return self.datain["num-total-frames"]
def getNumDroppedFrames(self):
return self.datain["num-dropped-frames"]
def getFps(self):
return self.datain["fps"]
class Exiting(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "Exiting"
class SwitchTransition(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SwitchTransition"
self.datain["transition-name"] = None
def getTransitionName(self):
return self.datain["transition-name"]
class TransitionListChanged(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "TransitionListChanged"
class TransitionDurationChanged(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "TransitionDurationChanged"
self.datain["new-duration"] = None
def getNewDuration(self):
return self.datain["new-duration"]
class TransitionBegin(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "TransitionBegin"
self.datain["name"] = None
self.datain["duration"] = None
self.datain["from-scene"] = None
self.datain["to-scene"] = None
def getName(self):
return self.datain["name"]
def getDuration(self):
return self.datain["duration"]
def getFromScene(self):
return self.datain["from-scene"]
def getToScene(self):
return self.datain["to-scene"]
| true | true |
f71e81879e0d08f2be5ab349bd6798f8f04829fe | 346 | py | Python | AllRoutesLeadToRome/app.py | kkhan01/softdev | 60c94919e8a5aba3db3d91849878057b8426cb4c | [
"MIT"
] | 1 | 2020-05-02T01:41:06.000Z | 2020-05-02T01:41:06.000Z | AllRoutesLeadToRome/app.py | kkhan01/softdev | 60c94919e8a5aba3db3d91849878057b8426cb4c | [
"MIT"
] | null | null | null | AllRoutesLeadToRome/app.py | kkhan01/softdev | 60c94919e8a5aba3db3d91849878057b8426cb4c | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
@app.route('/')
def display00():
return 'Heya! </br> This is the first page! </br> Others are at: /01 /02'
@app.route('/01')
def display01():
return 'And now: The second page!'
@app.route('/02')
def display02():
return 'Woah! The last page!'
if __name__ == '__main__':
app.run()
| 18.210526 | 77 | 0.627168 | from flask import Flask
app = Flask(__name__)
@app.route('/')
def display00():
return 'Heya! </br> This is the first page! </br> Others are at: /01 /02'
@app.route('/01')
def display01():
return 'And now: The second page!'
@app.route('/02')
def display02():
return 'Woah! The last page!'
if __name__ == '__main__':
app.run()
| true | true |
f71e822e91707a7d824f5756df17632036e10f8a | 2,936 | py | Python | Browser/keywords/promises.py | emanlove/robotframework-browser | 8d9dae4301fe263bc0f7682de58a6bf299211382 | [
"Apache-2.0"
] | null | null | null | Browser/keywords/promises.py | emanlove/robotframework-browser | 8d9dae4301fe263bc0f7682de58a6bf299211382 | [
"Apache-2.0"
] | null | null | null | Browser/keywords/promises.py | emanlove/robotframework-browser | 8d9dae4301fe263bc0f7682de58a6bf299211382 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent.futures import Future, ThreadPoolExecutor
from robot.api.deco import keyword # type: ignore
from robot.libraries.BuiltIn import EXECUTION_CONTEXTS # type: ignore
from ..base import LibraryComponent
class Promises(LibraryComponent):
def __init__(self, library):
LibraryComponent.__init__(self, library)
self._executor = ThreadPoolExecutor(max_workers=256)
@keyword(tags=["Wait"])
def promise_to(self, kw: str, *args):
"""
Wrap a Browser library keyword and make it a promise.
Returns that promise and executes the keyword on background.
``kw`` <str> Keyword that will work async on background.
Example:
| ${promise}= Promise To Wait For Response matcher= timeout=3
| Click \\#delayed_request
| ${body}= Wait For ${promise}
"""
browser_lib = EXECUTION_CONTEXTS.current.namespace._kw_store.get_library(
self.library
)
handler = browser_lib.handlers[kw]
positional, named = handler.resolve_arguments(
args, EXECUTION_CONTEXTS.current.variables
)
named = dict(named)
promise = self._executor.submit(handler.current_handler(), *positional, **named)
self.unresolved_promises.add(promise)
return promise
@keyword(tags=["Wait"])
def wait_for(self, *promises: Future):
"""
Waits for promises to finish and returns results from them.
Returns one result if one promise waited. Otherwise returns an array of results.
If one fails, then this keyword will fail.
``promises`` *Work in progress*
Example:
| ${promise}= Promise To Wait For Response matcher= timeout=3
| Click \\#delayed_request
| ${body}= Wait For ${promise}
"""
self.unresolved_promises -= {*promises}
if len(promises) == 1:
return promises[0].result()
return [promise.result() for promise in promises]
@keyword(tags=["Wait"])
def wait_for_all_promises(self):
"""
Waits for all promises to finish.
If one fails, then this keyword will fail.
"""
self.wait_for(*self.unresolved_promises)
| 36.7 | 92 | 0.641008 |
from concurrent.futures import Future, ThreadPoolExecutor
from robot.api.deco import keyword
from robot.libraries.BuiltIn import EXECUTION_CONTEXTS
from ..base import LibraryComponent
class Promises(LibraryComponent):
def __init__(self, library):
LibraryComponent.__init__(self, library)
self._executor = ThreadPoolExecutor(max_workers=256)
@keyword(tags=["Wait"])
def promise_to(self, kw: str, *args):
browser_lib = EXECUTION_CONTEXTS.current.namespace._kw_store.get_library(
self.library
)
handler = browser_lib.handlers[kw]
positional, named = handler.resolve_arguments(
args, EXECUTION_CONTEXTS.current.variables
)
named = dict(named)
promise = self._executor.submit(handler.current_handler(), *positional, **named)
self.unresolved_promises.add(promise)
return promise
@keyword(tags=["Wait"])
def wait_for(self, *promises: Future):
self.unresolved_promises -= {*promises}
if len(promises) == 1:
return promises[0].result()
return [promise.result() for promise in promises]
@keyword(tags=["Wait"])
def wait_for_all_promises(self):
self.wait_for(*self.unresolved_promises)
| true | true |
f71e828d2972790e9832de08ed3694172916c5fe | 6,556 | py | Python | andres@programo.ual.es/bayesian_pca_DR.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | andres@programo.ual.es/bayesian_pca_DR.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | andres@programo.ual.es/bayesian_pca_DR.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | import numpy as np
from prml.feature_extractions.pca import PCA
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.preprocessing import StandardScaler
class BayesianPCA_DR(PCA):
def _clusteringError(self, X, kmeans):
sum = 0
for i in range(0, kmeans.cluster_centers_.shape[0]):
a = X[kmeans.labels_ == i, :] - kmeans.cluster_centers_[i, :]
sum += np.sqrt((a * a).sum(axis=1)).sum(axis=0)
return sum
def _random(self, X, n_clusters):
centers_X = X[np.random.choice(X.shape[0], n_clusters, replace=False),:]
centers_XX = centers_X**2
weights = np.repeat(X.shape[0]/n_clusters,n_clusters)
self.X_dr = {'X': centers_X, 'XX': centers_XX,
'W': weights}
def _clusterSS(self, X, n_clusters):
XX = X ** 2
XJoin = np.concatenate((X, XX), axis=1)
self.kmeans = MiniBatchKMeans(n_clusters=n_clusters).fit(XJoin)
weights = np.asarray([sum(self.kmeans.labels_ == x) for x in range(0, n_clusters)])
D=X.shape[1]
self.X_dr = {'X': self.kmeans.cluster_centers_[:, 0:D], 'XX': self.kmeans.cluster_centers_[:, D:2 * D], 'W': weights}
self.clusterError = self._clusteringError(XJoin,self.kmeans)
def _cluster(self, X, n_clusters):
self.kmeans = MiniBatchKMeans(n_clusters=n_clusters).fit(X)
weights = np.asarray([sum(self.kmeans.labels_ == x) for x in range(0, n_clusters)])
self.X_dr = {'X': self.kmeans.cluster_centers_, 'XX': self.kmeans.cluster_centers_ ** 2, 'W': weights}
# def _clusterSS(self, X, n_clusters):
# scaler = StandardScaler()
# XX = X ** 2
# XJoin = np.concatenate((X, XX), axis=1)
# self.kmeans = MiniBatchKMeans(n_clusters=n_clusters).fit(scaler.fit_transform(XJoin))
# weights = np.asarray([sum(self.kmeans.labels_ == x) for x in range(0, n_clusters)])
# D=X.shape[1]
# self.kmeans.cluster_centers_=scaler.inverse_transform(self.kmeans.cluster_centers_)
# self.X_dr = {'X': self.kmeans.cluster_centers_[:, 0:D], 'XX': self.kmeans.cluster_centers_[:, D:2 * D], 'W': weights}
#
# def _cluster(self, X, n_clusters):
# scaler = StandardScaler()
# self.kmeans = MiniBatchKMeans(n_clusters=n_clusters).fit(scaler.fit_transform(X))
# weights = np.asarray([sum(self.kmeans.labels_ == x) for x in range(0, n_clusters)])
# self.kmeans.cluster_centers_=scaler.inverse_transform(self.kmeans.cluster_centers_)
# self.X_dr = {'X': self.kmeans.cluster_centers_, 'XX': self.kmeans.cluster_centers_ ** 2, 'W': weights}
def eigen(self, X_dr, *arg):
sample_size = np.sum(X_dr['W'])
X = self.X_dr['W'][:,None]*self.X_dr['X']
n_features = X.shape[1]
if sample_size >= n_features:
cov = np.cov(X, rowvar=False)
values, vectors = np.linalg.eigh(cov)
index = n_features - self.n_components
else:
cov = np.cov(X)
values, vectors = np.linalg.eigh(cov)
vectors = (X.T @ vectors) / np.sqrt(sample_size * values)
index = sample_size - self.n_components
self.I = np.eye(self.n_components)
if index == 0:
self.var = 0
else:
self.var = np.mean(values[:index])
self.W = vectors[:, index:].dot(np.sqrt(np.diag(values[index:]) - self.var * self.I))
self.__M = self.W.T @ self.W + self.var * self.I
self.C = self.W @ self.W.T + self.var * np.eye(n_features)
if index == 0:
self.Cinv = np.linalg.inv(self.C)
else:
self.Cinv = np.eye(n_features) / np.sqrt(self.var) - self.W @ np.linalg.inv(self.__M) @ self.W.T / self.var
def fit(self, X, iter_max=100, initial="random", n_clusters=10, cluster_method="SS"):
"""
empirical bayes estimation of pca parameters
Parameters
----------
X : (sample_size, n_features) ndarray
input data
iter_max : int
maximum number of em steps
Returns
-------
mean : (n_features,) ndarray
sample mean fo the input data
W : (n_features, n_components) ndarray
projection matrix
var : float
variance of observation noise
"""
if cluster_method== "SS":
self._clusterSS(X,n_clusters)
elif cluster_method== "NoSS":
self._cluster(X,n_clusters)
elif cluster_method == "random":
self._random(X,n_clusters)
initial_list = ["random", "eigen"]
self.mean = np.sum(self.X_dr['W'][:,None]*self.X_dr['X'], axis=0)/sum(self.X_dr['W'])
self.I = np.eye(self.n_components)
if initial not in initial_list:
print("availabel initializations are {}".format(initial_list))
if initial == "random":
self.W = np.eye(np.size(self.X_dr['X'], 1), self.n_components)
self.var = 1.
elif initial == "eigen":
self.eigen(self.X_dr)
self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)
for i in range(iter_max):
W = np.copy(self.W)
Ez, Ezz = self._expectation(self.X_dr['X']-self.mean)
self._maximization(self.X_dr, Ez, Ezz)
#self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)
if np.allclose(W, self.W):
break
self.n_iter = i + 1
self.C = self.W @ self.W.T + self.var * np.eye(np.size(self.X_dr['X'], 1))
self.Cinv = np.linalg.inv(self.C)
def _maximization(self, X_dr, Ez, Ezz):
X_mean = (X_dr['X']-self.mean)
self.W = (X_mean*X_dr['W'][:,None]).T @ Ez @ np.linalg.inv(np.sum(Ezz*X_dr['W'][:,None,None], axis=0) + self.var * np.diag(self.alpha))
self.var = np.sum(
(np.mean((X_dr['XX'] - 2*X_dr['X']*self.mean + self.mean ** 2), axis=-1)
#(np.mean((X_mean** 2), axis=-1)
- 2 * np.mean(Ez @ self.W.T * X_mean, axis=-1)
+ np.trace((Ezz @ self.W.T @ self.W).T)/ len(self.mean))*X_dr['W'])/sum(X_dr['W'])
self.var=max(self.var,0.000001)
def maximize(self, D, Ez, Ezz):
self.W = D.T.dot(Ez).dot(np.linalg.inv(np.sum(Ezz, axis=0) + self.var * np.diag(self.alpha)))
self.var = np.mean(
np.mean(D ** 2, axis=-1)
- 2 * np.mean(Ez.dot(self.W.T) * D, axis=-1)
+ np.trace(Ezz.dot(self.W.T).dot(self.W).T) / self.ndim)
| 43.706667 | 143 | 0.575046 | import numpy as np
from prml.feature_extractions.pca import PCA
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.preprocessing import StandardScaler
class BayesianPCA_DR(PCA):
def _clusteringError(self, X, kmeans):
sum = 0
for i in range(0, kmeans.cluster_centers_.shape[0]):
a = X[kmeans.labels_ == i, :] - kmeans.cluster_centers_[i, :]
sum += np.sqrt((a * a).sum(axis=1)).sum(axis=0)
return sum
def _random(self, X, n_clusters):
centers_X = X[np.random.choice(X.shape[0], n_clusters, replace=False),:]
centers_XX = centers_X**2
weights = np.repeat(X.shape[0]/n_clusters,n_clusters)
self.X_dr = {'X': centers_X, 'XX': centers_XX,
'W': weights}
def _clusterSS(self, X, n_clusters):
XX = X ** 2
XJoin = np.concatenate((X, XX), axis=1)
self.kmeans = MiniBatchKMeans(n_clusters=n_clusters).fit(XJoin)
weights = np.asarray([sum(self.kmeans.labels_ == x) for x in range(0, n_clusters)])
D=X.shape[1]
self.X_dr = {'X': self.kmeans.cluster_centers_[:, 0:D], 'XX': self.kmeans.cluster_centers_[:, D:2 * D], 'W': weights}
self.clusterError = self._clusteringError(XJoin,self.kmeans)
def _cluster(self, X, n_clusters):
self.kmeans = MiniBatchKMeans(n_clusters=n_clusters).fit(X)
weights = np.asarray([sum(self.kmeans.labels_ == x) for x in range(0, n_clusters)])
self.X_dr = {'X': self.kmeans.cluster_centers_, 'XX': self.kmeans.cluster_centers_ ** 2, 'W': weights}
def eigen(self, X_dr, *arg):
sample_size = np.sum(X_dr['W'])
X = self.X_dr['W'][:,None]*self.X_dr['X']
n_features = X.shape[1]
if sample_size >= n_features:
cov = np.cov(X, rowvar=False)
values, vectors = np.linalg.eigh(cov)
index = n_features - self.n_components
else:
cov = np.cov(X)
values, vectors = np.linalg.eigh(cov)
vectors = (X.T @ vectors) / np.sqrt(sample_size * values)
index = sample_size - self.n_components
self.I = np.eye(self.n_components)
if index == 0:
self.var = 0
else:
self.var = np.mean(values[:index])
self.W = vectors[:, index:].dot(np.sqrt(np.diag(values[index:]) - self.var * self.I))
self.__M = self.W.T @ self.W + self.var * self.I
self.C = self.W @ self.W.T + self.var * np.eye(n_features)
if index == 0:
self.Cinv = np.linalg.inv(self.C)
else:
self.Cinv = np.eye(n_features) / np.sqrt(self.var) - self.W @ np.linalg.inv(self.__M) @ self.W.T / self.var
def fit(self, X, iter_max=100, initial="random", n_clusters=10, cluster_method="SS"):
if cluster_method== "SS":
self._clusterSS(X,n_clusters)
elif cluster_method== "NoSS":
self._cluster(X,n_clusters)
elif cluster_method == "random":
self._random(X,n_clusters)
initial_list = ["random", "eigen"]
self.mean = np.sum(self.X_dr['W'][:,None]*self.X_dr['X'], axis=0)/sum(self.X_dr['W'])
self.I = np.eye(self.n_components)
if initial not in initial_list:
print("availabel initializations are {}".format(initial_list))
if initial == "random":
self.W = np.eye(np.size(self.X_dr['X'], 1), self.n_components)
self.var = 1.
elif initial == "eigen":
self.eigen(self.X_dr)
self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)
for i in range(iter_max):
W = np.copy(self.W)
Ez, Ezz = self._expectation(self.X_dr['X']-self.mean)
self._maximization(self.X_dr, Ez, Ezz)
if np.allclose(W, self.W):
break
self.n_iter = i + 1
self.C = self.W @ self.W.T + self.var * np.eye(np.size(self.X_dr['X'], 1))
self.Cinv = np.linalg.inv(self.C)
def _maximization(self, X_dr, Ez, Ezz):
X_mean = (X_dr['X']-self.mean)
self.W = (X_mean*X_dr['W'][:,None]).T @ Ez @ np.linalg.inv(np.sum(Ezz*X_dr['W'][:,None,None], axis=0) + self.var * np.diag(self.alpha))
self.var = np.sum(
(np.mean((X_dr['XX'] - 2*X_dr['X']*self.mean + self.mean ** 2), axis=-1)
- 2 * np.mean(Ez @ self.W.T * X_mean, axis=-1)
+ np.trace((Ezz @ self.W.T @ self.W).T)/ len(self.mean))*X_dr['W'])/sum(X_dr['W'])
self.var=max(self.var,0.000001)
def maximize(self, D, Ez, Ezz):
self.W = D.T.dot(Ez).dot(np.linalg.inv(np.sum(Ezz, axis=0) + self.var * np.diag(self.alpha)))
self.var = np.mean(
np.mean(D ** 2, axis=-1)
- 2 * np.mean(Ez.dot(self.W.T) * D, axis=-1)
+ np.trace(Ezz.dot(self.W.T).dot(self.W).T) / self.ndim)
| true | true |
f71e836c03860d0845b884fe67b551b2e44b4a7b | 2,858 | py | Python | pycfmodel/model/resources/properties/security_group_ingress_prop.py | donatoaz/pycfmodel | 1586e290b67d2347493dd4a77d2b0c8ee6c0936b | [
"Apache-2.0"
] | null | null | null | pycfmodel/model/resources/properties/security_group_ingress_prop.py | donatoaz/pycfmodel | 1586e290b67d2347493dd4a77d2b0c8ee6c0936b | [
"Apache-2.0"
] | null | null | null | pycfmodel/model/resources/properties/security_group_ingress_prop.py | donatoaz/pycfmodel | 1586e290b67d2347493dd4a77d2b0c8ee6c0936b | [
"Apache-2.0"
] | null | null | null | from ipaddress import IPv4Network, IPv6Network
from typing import Optional
from pydantic import validator
from pycfmodel.constants import IPV4_ZERO_VALUE, IPV6_ZERO_VALUE
from pycfmodel.model.resources.properties.property import Property
from pycfmodel.model.types import (
ResolvableInt,
ResolvableIntOrStr,
ResolvableIPv4Network,
ResolvableIPv6Network,
ResolvableStr,
)
class SecurityGroupIngressProp(Property):
"""
An inbound rule permits instances to receive traffic from the specified IPv4 or IPv6 CIDR address range, or from the instances associated with the specified security group.
Properties:
- CidrIp: The IPv4 ranges.
- CidrIpv6: The IPv6 ranges.
- Description: The description of an egress (outbound) security group rule.
- FromPort: The start of port range for the TCP and UDP protocols.
- IpProtocol: The IP protocol name (tcp, udp, icmp, icmpv6) or number ([see Protocol Numbers](http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)).
- SourcePrefixListId: The prefix list IDs for an AWS service.
- SourceSecurityGroupId: The ID of the security group.
- SourceSecurityGroupName: The name of the source security group.
- SourceSecurityGroupOwnerId: The AWS account ID for the source security group.
- ToPort: The end of port range for the TCP and UDP protocols.
More info at [AWS Docs](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group-ingress.html)
"""
CidrIp: Optional[ResolvableIPv4Network] = None
CidrIpv6: Optional[ResolvableIPv6Network] = None
Description: Optional[ResolvableStr] = None
FromPort: Optional[ResolvableInt] = None
IpProtocol: ResolvableIntOrStr
SourcePrefixListId: Optional[ResolvableStr] = None
SourceSecurityGroupId: Optional[ResolvableStr] = None
SourceSecurityGroupName: Optional[ResolvableStr] = None
SourceSecurityGroupOwnerId: Optional[ResolvableStr] = None
ToPort: Optional[ResolvableInt] = None
@validator("CidrIp", pre=True)
def set_CidrIp(cls, v):
return IPv4Network(v, strict=False)
@validator("CidrIpv6", pre=True)
def set_CidrIpv6(cls, v):
return IPv6Network(v, strict=False)
def ipv4_slash_zero(self) -> bool:
"""Returns True if `CidrIp` matches `0.0.0.0/0`, otherwise False."""
# Remove after this is fixed https://bugs.python.org/issue38655
if not self.CidrIp:
return False
return self.CidrIp == IPv4Network(IPV4_ZERO_VALUE)
def ipv6_slash_zero(self) -> bool:
"""Returns True if `CidrIpv6` matches `::/0`, otherwise False."""
# Remove after this is fixed https://bugs.python.org/issue38655
if not self.CidrIpv6:
return False
return self.CidrIpv6 == IPv6Network(IPV6_ZERO_VALUE)
| 41.42029 | 176 | 0.728132 | from ipaddress import IPv4Network, IPv6Network
from typing import Optional
from pydantic import validator
from pycfmodel.constants import IPV4_ZERO_VALUE, IPV6_ZERO_VALUE
from pycfmodel.model.resources.properties.property import Property
from pycfmodel.model.types import (
ResolvableInt,
ResolvableIntOrStr,
ResolvableIPv4Network,
ResolvableIPv6Network,
ResolvableStr,
)
class SecurityGroupIngressProp(Property):
CidrIp: Optional[ResolvableIPv4Network] = None
CidrIpv6: Optional[ResolvableIPv6Network] = None
Description: Optional[ResolvableStr] = None
FromPort: Optional[ResolvableInt] = None
IpProtocol: ResolvableIntOrStr
SourcePrefixListId: Optional[ResolvableStr] = None
SourceSecurityGroupId: Optional[ResolvableStr] = None
SourceSecurityGroupName: Optional[ResolvableStr] = None
SourceSecurityGroupOwnerId: Optional[ResolvableStr] = None
ToPort: Optional[ResolvableInt] = None
@validator("CidrIp", pre=True)
def set_CidrIp(cls, v):
return IPv4Network(v, strict=False)
@validator("CidrIpv6", pre=True)
def set_CidrIpv6(cls, v):
return IPv6Network(v, strict=False)
def ipv4_slash_zero(self) -> bool:
if not self.CidrIp:
return False
return self.CidrIp == IPv4Network(IPV4_ZERO_VALUE)
def ipv6_slash_zero(self) -> bool:
if not self.CidrIpv6:
return False
return self.CidrIpv6 == IPv6Network(IPV6_ZERO_VALUE)
| true | true |
f71e85137e6e9b5019198c6010110150cbe49a78 | 34 | py | Python | pycl/Code/readfilec.py | dcavar/dcavar.github.io | bf96820f41563bab73ba35a98142da4ab5ad50a1 | [
"Apache-2.0"
] | 4 | 2018-01-11T22:14:11.000Z | 2019-06-13T09:56:18.000Z | pycl/Code/readfilec.py | dcavar/dcavar.github.io | bf96820f41563bab73ba35a98142da4ab5ad50a1 | [
"Apache-2.0"
] | null | null | null | pycl/Code/readfilec.py | dcavar/dcavar.github.io | bf96820f41563bab73ba35a98142da4ab5ad50a1 | [
"Apache-2.0"
] | 1 | 2020-01-25T02:16:38.000Z | 2020-01-25T02:16:38.000Z | print open("readfilec.py").read()
| 17 | 33 | 0.705882 | print open("readfilec.py").read()
| false | true |
f71e8601ba5f31a1c9d2c21ca11533def0e76aa3 | 1,022 | py | Python | nostradamus/apps/description_assessment/serializers.py | exactpro/nostradamus | 80df847a012374ad2b702cc9f9c9cb46c1153ee7 | [
"Apache-2.0"
] | 25 | 2019-12-18T05:32:41.000Z | 2022-03-23T12:16:49.000Z | nostradamus/apps/description_assessment/serializers.py | Exactpro/nostradamus | 80df847a012374ad2b702cc9f9c9cb46c1153ee7 | [
"Apache-2.0"
] | 12 | 2018-12-24T14:56:50.000Z | 2019-11-29T16:53:49.000Z | nostradamus/apps/description_assessment/serializers.py | exactpro/nostradamus | 80df847a012374ad2b702cc9f9c9cb46c1153ee7 | [
"Apache-2.0"
] | 7 | 2019-12-18T05:32:43.000Z | 2021-08-18T05:27:04.000Z | from rest_framework import serializers
class DescriptionAssessmentResponseSerializer(serializers.Serializer):
priority = serializers.ListField(child=serializers.CharField())
resolution = serializers.ListField(child=serializers.CharField())
areas_of_testing = serializers.ListField(child=serializers.CharField())
class PredictorResponseSerializer(serializers.Serializer):
priority = serializers.DictField(child=serializers.FloatField())
resolution = serializers.DictField(child=serializers.FloatField())
areas_of_testing = serializers.DictField(child=serializers.FloatField())
time_to_resolve = serializers.DictField(child=serializers.FloatField())
class HighlightingResponseSerializer(serializers.ListSerializer):
child = serializers.CharField()
class PredictorRequestSerializer(serializers.Serializer):
description = serializers.CharField()
class HighlightingRequestSerializer(serializers.Serializer):
metric = serializers.CharField()
value = serializers.CharField()
| 36.5 | 76 | 0.811155 | from rest_framework import serializers
class DescriptionAssessmentResponseSerializer(serializers.Serializer):
priority = serializers.ListField(child=serializers.CharField())
resolution = serializers.ListField(child=serializers.CharField())
areas_of_testing = serializers.ListField(child=serializers.CharField())
class PredictorResponseSerializer(serializers.Serializer):
priority = serializers.DictField(child=serializers.FloatField())
resolution = serializers.DictField(child=serializers.FloatField())
areas_of_testing = serializers.DictField(child=serializers.FloatField())
time_to_resolve = serializers.DictField(child=serializers.FloatField())
class HighlightingResponseSerializer(serializers.ListSerializer):
child = serializers.CharField()
class PredictorRequestSerializer(serializers.Serializer):
description = serializers.CharField()
class HighlightingRequestSerializer(serializers.Serializer):
metric = serializers.CharField()
value = serializers.CharField()
| true | true |
f71e8612627c373ae43be44f6304eef90f1f77b7 | 3,431 | py | Python | diarization/toys/gen_xvec_lbl.py | theScrabi/kaldi_voxceleb_pytorch | bce3f8c5506df0128dd87f6aff60f9924806f5b6 | [
"MIT"
] | 3 | 2020-04-06T06:33:19.000Z | 2020-04-08T06:24:15.000Z | diarization/toys/gen_xvec_lbl.py | theScrabi/kaldi_voxceleb_pytorch | bce3f8c5506df0128dd87f6aff60f9924806f5b6 | [
"MIT"
] | null | null | null | diarization/toys/gen_xvec_lbl.py | theScrabi/kaldi_voxceleb_pytorch | bce3f8c5506df0128dd87f6aff60f9924806f5b6 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
import numpy as np
from math import floor
if len(sys.argv) <= 3:
print("gen_xvec_lbl.py <segments_file> <frame_size> <stride>")
print("You need to enter the segments file")
print("generated by generate_segments.py")
print("Second and third parameter need to be")
print("length and strade of the frame used")
print("to generate xvectors over.")
print("The length needs to be enterd in 10ms units")
print("A frame of 150ms therefore has needs to be entered as 15.")
print("Stride can be float values")
exit(1)
segments_file_name = sys.argv[1]
frame_size = int(sys.argv[2])
half_frame = frame_size/2
stride = float(sys.argv[3])
def read_segments(file_name):
convs = {}
segments = []
current_conv = ""
mfcc_pos = 0
for seg in open(file_name).readlines():
seg = seg.replace("\n", "").split(" ")
cid = seg[0]
sid = seg[1]
start = int(seg[2])
stop = int(seg[3])
# save last count of segments
if not current_conv == cid and not current_conv == "":
convs[current_conv] = segments
segments = []
mfcc_pos = 0
current_conv = cid
seg_dur = stop - start
segments.append({"sid":sid, "mfcc_pos":mfcc_pos, "time_pos":start, "dur":seg_dur})
mfcc_pos += seg_dur
if len(segments) > 0:
convs[current_conv] = segments
return convs
def get_mfcc_count_of_segments(segments):
count = 0
for seg in segments:
count += seg["dur"]
return count
def get_count_of_frames(mfcc_count):
return int((mfcc_count - frame_size) / stride) + 1
def get_speaker_of_segments(segments):
speaker = {}
i = 1
for s in segments:
if not s["sid"] in speaker:
speaker[s["sid"]] = i
i += 1
return speaker
def get_touching_segments(segments, start, stop):
touching_segments = []
for seg in segments:
if seg["mfcc_pos"] < stop and seg["mfcc_pos"] + seg["dur"] >= start:
touch_start = seg["time_pos"] if seg["mfcc_pos"] >= start else (seg["time_pos"] - seg["mfcc_pos"] + start)
touch_end = (seg["time_pos"] + seg["dur"]) if seg["mfcc_pos"] + seg["dur"] <= stop else (seg["time_pos"] - seg["mfcc_pos"] + stop)
touching_segments.append((seg, touch_start, touch_end))
return touching_segments
def label_frames_by_segments(conv_id, segments):
frames = np.zeros(get_count_of_frames(get_mfcc_count_of_segments(segments)))
speaker = get_speaker_of_segments(segments)
for i in range(0, frames.shape[0]):
frame_center = i * stride + half_frame
sids_of_frame = []
touch_starts_of_frame = []
touch_ends_of_frame = []
for seg in get_touching_segments(segments, frame_center - half_frame, frame_center + half_frame):
sids_of_frame.append(seg[0]["sid"])
touch_starts_of_frame.append(str(int(seg[1])))
touch_ends_of_frame.append(str(int(seg[2])))
sids_of_frame = "-".join(sids_of_frame)
touch_starts_of_frame = "-".join(touch_starts_of_frame)
touch_ends_of_frame = "-".join(touch_ends_of_frame)
print(f"{conv_id} {i} {sids_of_frame} {touch_starts_of_frame} {touch_ends_of_frame}")
convs = read_segments(segments_file_name)
for i in convs:
label_frames_by_segments(i, convs[i])
| 31.477064 | 142 | 0.633926 |
import sys
import numpy as np
from math import floor
if len(sys.argv) <= 3:
print("gen_xvec_lbl.py <segments_file> <frame_size> <stride>")
print("You need to enter the segments file")
print("generated by generate_segments.py")
print("Second and third parameter need to be")
print("length and strade of the frame used")
print("to generate xvectors over.")
print("The length needs to be enterd in 10ms units")
print("A frame of 150ms therefore has needs to be entered as 15.")
print("Stride can be float values")
exit(1)
segments_file_name = sys.argv[1]
frame_size = int(sys.argv[2])
half_frame = frame_size/2
stride = float(sys.argv[3])
def read_segments(file_name):
convs = {}
segments = []
current_conv = ""
mfcc_pos = 0
for seg in open(file_name).readlines():
seg = seg.replace("\n", "").split(" ")
cid = seg[0]
sid = seg[1]
start = int(seg[2])
stop = int(seg[3])
if not current_conv == cid and not current_conv == "":
convs[current_conv] = segments
segments = []
mfcc_pos = 0
current_conv = cid
seg_dur = stop - start
segments.append({"sid":sid, "mfcc_pos":mfcc_pos, "time_pos":start, "dur":seg_dur})
mfcc_pos += seg_dur
if len(segments) > 0:
convs[current_conv] = segments
return convs
def get_mfcc_count_of_segments(segments):
count = 0
for seg in segments:
count += seg["dur"]
return count
def get_count_of_frames(mfcc_count):
return int((mfcc_count - frame_size) / stride) + 1
def get_speaker_of_segments(segments):
speaker = {}
i = 1
for s in segments:
if not s["sid"] in speaker:
speaker[s["sid"]] = i
i += 1
return speaker
def get_touching_segments(segments, start, stop):
touching_segments = []
for seg in segments:
if seg["mfcc_pos"] < stop and seg["mfcc_pos"] + seg["dur"] >= start:
touch_start = seg["time_pos"] if seg["mfcc_pos"] >= start else (seg["time_pos"] - seg["mfcc_pos"] + start)
touch_end = (seg["time_pos"] + seg["dur"]) if seg["mfcc_pos"] + seg["dur"] <= stop else (seg["time_pos"] - seg["mfcc_pos"] + stop)
touching_segments.append((seg, touch_start, touch_end))
return touching_segments
def label_frames_by_segments(conv_id, segments):
frames = np.zeros(get_count_of_frames(get_mfcc_count_of_segments(segments)))
speaker = get_speaker_of_segments(segments)
for i in range(0, frames.shape[0]):
frame_center = i * stride + half_frame
sids_of_frame = []
touch_starts_of_frame = []
touch_ends_of_frame = []
for seg in get_touching_segments(segments, frame_center - half_frame, frame_center + half_frame):
sids_of_frame.append(seg[0]["sid"])
touch_starts_of_frame.append(str(int(seg[1])))
touch_ends_of_frame.append(str(int(seg[2])))
sids_of_frame = "-".join(sids_of_frame)
touch_starts_of_frame = "-".join(touch_starts_of_frame)
touch_ends_of_frame = "-".join(touch_ends_of_frame)
print(f"{conv_id} {i} {sids_of_frame} {touch_starts_of_frame} {touch_ends_of_frame}")
convs = read_segments(segments_file_name)
for i in convs:
label_frames_by_segments(i, convs[i])
| true | true |
f71e8647524eb5aa1aee85b239a44b7b231aacd1 | 9,304 | py | Python | tests/test_data_collator.py | WERimagin/transformers | cc7d14511c647f8147494df72f8b0575015e37ab | [
"Apache-2.0"
] | 47 | 2021-04-16T22:29:25.000Z | 2022-02-11T08:19:13.000Z | tests/test_data_collator.py | WERimagin/transformers | cc7d14511c647f8147494df72f8b0575015e37ab | [
"Apache-2.0"
] | 12 | 2021-04-28T19:45:02.000Z | 2021-08-31T13:56:02.000Z | tests/test_data_collator.py | WERimagin/transformers | cc7d14511c647f8147494df72f8b0575015e37ab | [
"Apache-2.0"
] | 5 | 2021-04-28T21:54:15.000Z | 2022-02-11T07:48:17.000Z | import unittest
from transformers import AutoTokenizer, is_torch_available
from transformers.testing_utils import require_torch, slow
if is_torch_available():
import torch
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForNextSentencePrediction,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSOP,
GlueDataset,
GlueDataTrainingArguments,
LineByLineTextDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
default_data_collator,
)
PATH_SAMPLE_TEXT = "./tests/fixtures/sample_text.txt"
PATH_SAMPLE_TEXT_DIR = "./tests/fixtures/tests_samples/wiki_text"
@require_torch
class DataCollatorIntegrationTest(unittest.TestCase):
def test_default_with_dict(self):
features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# With label_ids
features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor([[0, 1, 2]] * 8)))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# Features can already be tensors
features = [{"label": i, "inputs": torch.randint(10, [10])} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 10]))
# Labels can already be tensors
features = [{"label": torch.tensor(i), "inputs": torch.randint(10, [10])} for i in range(8)]
batch = default_data_collator(features)
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 10]))
def test_default_with_no_labels(self):
features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# With label_ids
features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
@slow
def test_default_classification(self):
MODEL_ID = "bert-base-cased-finetuned-mrpc"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
data_collator = default_data_collator
batch = data_collator(dataset.features)
self.assertEqual(batch["labels"].dtype, torch.long)
@slow
def test_default_regression(self):
MODEL_ID = "distilroberta-base"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
data_args = GlueDataTrainingArguments(
task_name="sts-b", data_dir="./tests/fixtures/tests_samples/STS-B", overwrite_cache=True
)
dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
data_collator = default_data_collator
batch = data_collator(dataset.features)
self.assertEqual(batch["labels"].dtype, torch.float)
@slow
def test_lm_tokenizer_without_padding(self):
tokenizer = AutoTokenizer.from_pretrained("gpt2")
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
# ^ causal lm
dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
with self.assertRaises(ValueError):
# Expect error due to padding token missing on gpt2:
data_collator(examples)
dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 512)))
@slow
def test_lm_tokenizer_with_padding(self):
tokenizer = AutoTokenizer.from_pretrained("distilroberta-base")
data_collator = DataCollatorForLanguageModeling(tokenizer)
# ^ masked lm
dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((31, 107)))
self.assertEqual(batch["labels"].shape, torch.Size((31, 107)))
dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 512)))
@slow
def test_plm(self):
tokenizer = AutoTokenizer.from_pretrained("xlnet-base-cased")
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer)
# ^ permutation lm
dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((31, 112)))
self.assertEqual(batch["perm_mask"].shape, torch.Size((31, 112, 112)))
self.assertEqual(batch["target_mapping"].shape, torch.Size((31, 112, 112)))
self.assertEqual(batch["labels"].shape, torch.Size((31, 112)))
dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 512)))
self.assertEqual(batch["perm_mask"].shape, torch.Size((2, 512, 512)))
self.assertEqual(batch["target_mapping"].shape, torch.Size((2, 512, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 512)))
example = [torch.randint(5, [5])]
with self.assertRaises(ValueError):
# Expect error due to odd sequence length
data_collator(example)
@slow
def test_nsp(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
data_collator = DataCollatorForNextSentencePrediction(tokenizer)
dataset = TextDatasetForNextSentencePrediction(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
# Since there are randomly generated false samples, the total number of samples is not fixed.
total_samples = batch["input_ids"].shape[0]
self.assertEqual(batch["input_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["masked_lm_labels"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["next_sentence_label"].shape, torch.Size((total_samples,)))
@slow
def test_sop(self):
tokenizer = AutoTokenizer.from_pretrained("albert-base-v2")
data_collator = DataCollatorForSOP(tokenizer)
dataset = LineByLineWithSOPTextDataset(tokenizer, file_dir=PATH_SAMPLE_TEXT_DIR, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
# Since there are randomly generated false samples, the total number of samples is not fixed.
total_samples = batch["input_ids"].shape[0]
self.assertEqual(batch["input_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["sentence_order_label"].shape, torch.Size((total_samples,)))
| 47.228426 | 109 | 0.677343 | import unittest
from transformers import AutoTokenizer, is_torch_available
from transformers.testing_utils import require_torch, slow
if is_torch_available():
import torch
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForNextSentencePrediction,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSOP,
GlueDataset,
GlueDataTrainingArguments,
LineByLineTextDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
default_data_collator,
)
PATH_SAMPLE_TEXT = "./tests/fixtures/sample_text.txt"
PATH_SAMPLE_TEXT_DIR = "./tests/fixtures/tests_samples/wiki_text"
@require_torch
class DataCollatorIntegrationTest(unittest.TestCase):
def test_default_with_dict(self):
features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor([[0, 1, 2]] * 8)))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
features = [{"label": i, "inputs": torch.randint(10, [10])} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 10]))
features = [{"label": torch.tensor(i), "inputs": torch.randint(10, [10])} for i in range(8)]
batch = default_data_collator(features)
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 10]))
def test_default_with_no_labels(self):
features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
@slow
def test_default_classification(self):
MODEL_ID = "bert-base-cased-finetuned-mrpc"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
data_collator = default_data_collator
batch = data_collator(dataset.features)
self.assertEqual(batch["labels"].dtype, torch.long)
@slow
def test_default_regression(self):
MODEL_ID = "distilroberta-base"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
data_args = GlueDataTrainingArguments(
task_name="sts-b", data_dir="./tests/fixtures/tests_samples/STS-B", overwrite_cache=True
)
dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
data_collator = default_data_collator
batch = data_collator(dataset.features)
self.assertEqual(batch["labels"].dtype, torch.float)
@slow
def test_lm_tokenizer_without_padding(self):
tokenizer = AutoTokenizer.from_pretrained("gpt2")
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
with self.assertRaises(ValueError):
data_collator(examples)
dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 512)))
@slow
def test_lm_tokenizer_with_padding(self):
tokenizer = AutoTokenizer.from_pretrained("distilroberta-base")
data_collator = DataCollatorForLanguageModeling(tokenizer)
dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((31, 107)))
self.assertEqual(batch["labels"].shape, torch.Size((31, 107)))
dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 512)))
@slow
def test_plm(self):
tokenizer = AutoTokenizer.from_pretrained("xlnet-base-cased")
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer)
dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((31, 112)))
self.assertEqual(batch["perm_mask"].shape, torch.Size((31, 112, 112)))
self.assertEqual(batch["target_mapping"].shape, torch.Size((31, 112, 112)))
self.assertEqual(batch["labels"].shape, torch.Size((31, 112)))
dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 512)))
self.assertEqual(batch["perm_mask"].shape, torch.Size((2, 512, 512)))
self.assertEqual(batch["target_mapping"].shape, torch.Size((2, 512, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 512)))
example = [torch.randint(5, [5])]
with self.assertRaises(ValueError):
data_collator(example)
@slow
def test_nsp(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
data_collator = DataCollatorForNextSentencePrediction(tokenizer)
dataset = TextDatasetForNextSentencePrediction(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
total_samples = batch["input_ids"].shape[0]
self.assertEqual(batch["input_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["masked_lm_labels"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["next_sentence_label"].shape, torch.Size((total_samples,)))
@slow
def test_sop(self):
tokenizer = AutoTokenizer.from_pretrained("albert-base-v2")
data_collator = DataCollatorForSOP(tokenizer)
dataset = LineByLineWithSOPTextDataset(tokenizer, file_dir=PATH_SAMPLE_TEXT_DIR, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
total_samples = batch["input_ids"].shape[0]
self.assertEqual(batch["input_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["sentence_order_label"].shape, torch.Size((total_samples,)))
| true | true |
f71e87941ec4d26a71c4d0edea7028a110dd3419 | 1,040 | py | Python | examples/tensorflow_example.py | lucko515/cnn-raccoon | e1c46544372751d82cc0c0f9cb2218d881a21f70 | [
"Apache-2.0"
] | 30 | 2021-01-08T11:50:54.000Z | 2021-08-01T07:31:54.000Z | examples/tensorflow_example.py | lucko515/cnn-raccoon | e1c46544372751d82cc0c0f9cb2218d881a21f70 | [
"Apache-2.0"
] | 1 | 2021-01-24T23:10:38.000Z | 2021-01-24T23:10:38.000Z | examples/tensorflow_example.py | lucko515/cnn-raccoon | e1c46544372751d82cc0c0f9cb2218d881a21f70 | [
"Apache-2.0"
] | 4 | 2021-01-08T11:21:30.000Z | 2021-02-26T16:06:37.000Z | import tensorflow as tf
model = tf.keras.models.Sequential([
# YOUR CODE HERE
tf.keras.layers.BatchNormalization(input_shape=(32, 32, 3)),
tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(units=128, activation="relu"),
tf.keras.layers.Dense(10, activation="softmax")
])
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["acc"])
from tensorflow.keras.datasets import cifar10
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
from cnn_raccoon import inspector
inspector(model=model, images=X_train[:10], number_of_classes=10, engine="keras")
| 40 | 88 | 0.682692 | import tensorflow as tf
model = tf.keras.models.Sequential([
tf.keras.layers.BatchNormalization(input_shape=(32, 32, 3)),
tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(units=128, activation="relu"),
tf.keras.layers.Dense(10, activation="softmax")
])
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["acc"])
from tensorflow.keras.datasets import cifar10
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
from cnn_raccoon import inspector
inspector(model=model, images=X_train[:10], number_of_classes=10, engine="keras")
| true | true |
f71e879b585ccedee624c36d400debbe26d4a4ba | 2,235 | py | Python | chaospy/distributions/collection/f.py | krystophny/chaospy | e09f8e3f6dfc26145f15774edd5b03665140712f | [
"MIT"
] | 1 | 2019-12-20T00:32:44.000Z | 2019-12-20T00:32:44.000Z | chaospy/distributions/collection/f.py | QianWanghhu/chaospy | 18ff6c4fc56c632825e53fb24e17de51a7febd7d | [
"MIT"
] | null | null | null | chaospy/distributions/collection/f.py | QianWanghhu/chaospy | 18ff6c4fc56c632825e53fb24e17de51a7febd7d | [
"MIT"
] | null | null | null | """(Non-central) F distribution."""
import numpy
from scipy import special
from ..baseclass import Dist
from ..operators.addition import Add
class f(Dist):
"""F distribution."""
def __init__(self, dfn, dfd, nc):
Dist.__init__(self, dfn=dfn, dfd=dfd, nc=nc)
def _pdf(self, x, dfn, dfd, nc):
n1, n2 = dfn, dfd
term = -nc/2.+nc*n1*x/(2*(n2+n1*x)) + special.gammaln(n1/2.)+special.gammaln(1+n2/2.)
term -= special.gammaln((n1+n2)/2.)
Px = numpy.exp(term)
Px *= n1**(n1/2.) * n2**(n2/2.) * x**(n1/2.-1)
Px *= (n2+n1*x)**(-(n1+n2)/2.)
Px *= special.assoc_laguerre(-nc*n1*x/(2.*(n2+n1*x)), n2/2., n1/2.-1)
Px /= special.beta(n1/2., n2/2.)
return Px
def _cdf(self, x, dfn, dfd, nc):
return special.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return special.ncfdtri(dfn, dfd, nc, q)
def _bnd(self, x, dfn, dfd, nc):
return 0.0, self._ppf(1-1e-10, dfn, dfd, nc)
class F(Add):
"""
(Non-central) F or Fisher-Snedecor distribution.
Args:
n (float, Dist) : Degres of freedom for numerator
m (float, Dist) : Degres of freedom for denominator
scale (float, Dist) : Scaling parameter
shift (float, Dist) : Location parameter
nc (float, Dist) : Non-centrality parameter
Examples:
>>> distribution = chaospy.F(3, 3, 2, 1, 1)
>>> print(distribution)
F(m=3, n=3, nc=1, scale=2, shift=1)
>>> q = numpy.linspace(0, 1, 6)[1:-1]
>>> print(numpy.around(distribution.inv(q), 4))
[1.9336 2.9751 4.7028 8.8521]
>>> print(numpy.around(distribution.fwd(distribution.inv(q)), 4))
[0.2 0.4 0.6 0.8]
>>> print(numpy.around(distribution.pdf(distribution.inv(q)), 4))
[0.2277 0.1572 0.0837 0.027 ]
>>> print(numpy.around(distribution.sample(4), 4))
[ 5.4212 1.5739 25.7656 3.5586]
>>> print(distribution.mom(1) > 10**8) # undefined
True
"""
def __init__(self, n=1, m=1, scale=1, shift=0, nc=0):
self._repr = {"n": n, "m": m, "scale": scale, "shift": shift, "nc": nc}
Add.__init__(self, left=f(n, m, nc)*scale, right=shift)
| 33.358209 | 93 | 0.555257 | import numpy
from scipy import special
from ..baseclass import Dist
from ..operators.addition import Add
class f(Dist):
def __init__(self, dfn, dfd, nc):
Dist.__init__(self, dfn=dfn, dfd=dfd, nc=nc)
def _pdf(self, x, dfn, dfd, nc):
n1, n2 = dfn, dfd
term = -nc/2.+nc*n1*x/(2*(n2+n1*x)) + special.gammaln(n1/2.)+special.gammaln(1+n2/2.)
term -= special.gammaln((n1+n2)/2.)
Px = numpy.exp(term)
Px *= n1**(n1/2.) * n2**(n2/2.) * x**(n1/2.-1)
Px *= (n2+n1*x)**(-(n1+n2)/2.)
Px *= special.assoc_laguerre(-nc*n1*x/(2.*(n2+n1*x)), n2/2., n1/2.-1)
Px /= special.beta(n1/2., n2/2.)
return Px
def _cdf(self, x, dfn, dfd, nc):
return special.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return special.ncfdtri(dfn, dfd, nc, q)
def _bnd(self, x, dfn, dfd, nc):
return 0.0, self._ppf(1-1e-10, dfn, dfd, nc)
class F(Add):
def __init__(self, n=1, m=1, scale=1, shift=0, nc=0):
self._repr = {"n": n, "m": m, "scale": scale, "shift": shift, "nc": nc}
Add.__init__(self, left=f(n, m, nc)*scale, right=shift)
| true | true |
f71e88c0491436f3ee784fb6beab57efad7201e5 | 7,966 | py | Python | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py | google-ar/chromium | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777 | 2017-08-29T15:15:32.000Z | 2022-03-21T05:29:41.000Z | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66 | 2017-08-30T18:31:18.000Z | 2021-08-02T10:59:35.000Z | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123 | 2017-08-30T01:19:34.000Z | 2022-03-17T22:55:31.000Z | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for MockDRT."""
import io
import optparse
import unittest
from webkitpy.common.system.system_host_mock import MockSystemHost
from webkitpy.layout_tests.port import mock_drt
from webkitpy.layout_tests.port import port_testcase
from webkitpy.layout_tests.port import test
from webkitpy.layout_tests.port.factory import PortFactory
class MockDRTPortTest(port_testcase.PortTestCase):
def make_port(self, host=None, options=optparse.Values({'configuration': 'Release'})):
host = host or MockSystemHost()
test.add_unit_tests_to_mock_filesystem(host.filesystem)
return mock_drt.MockDRTPort(host, port_name='mock-mac', options=options)
def test_port_name_in_constructor(self):
self.assertTrue(mock_drt.MockDRTPort(MockSystemHost(), port_name='mock-test'))
def test_check_sys_deps(self):
pass
def test_default_max_locked_shards(self):
pass
def test_diff_image(self):
pass
def test_diff_image_crashed(self):
pass
def test_uses_apache(self):
pass
def test_get_crash_log(self):
pass
def test_check_build(self):
pass
def test_virtual_test_suites(self):
pass
def test_path_to_apache_config_file(self):
pass
class MockDRTTest(unittest.TestCase):
def input_line(self, port, test_name, pixel_tests, checksum=None):
url = port.create_driver(0).test_to_uri(test_name)
if url.startswith('file://'):
url = url[len('file://'):]
if pixel_tests:
url += "'--pixel-test"
if checksum:
url += "'" + checksum
return url + '\n'
def make_drt(self, options, args, host, stdin, stdout, stderr):
return mock_drt.MockDRT(options, args, host, stdin, stdout, stderr)
def make_input_output(self, port, test_name, pixel_tests,
expected_checksum, drt_output, drt_input=None, expected_text=None):
if pixel_tests:
if not expected_checksum:
expected_checksum = port.expected_checksum(test_name)
if not drt_input:
drt_input = self.input_line(port, test_name, pixel_tests, expected_checksum)
text_output = expected_text or port.expected_text(test_name) or ''
if not drt_output:
drt_output = self.expected_output(port, test_name, pixel_tests,
text_output, expected_checksum)
return (drt_input, drt_output)
def expected_output(self, port, test_name, pixel_tests, text_output, expected_checksum):
output = ['#READY\n', 'Content-Type: text/plain\n']
if text_output:
output.append(text_output)
output.append('#EOF\n')
if pixel_tests and expected_checksum:
output.extend(['\n',
'ActualHash: %s\n' % expected_checksum,
'ExpectedHash: %s\n' % expected_checksum])
output.append('#EOF\n')
return output
def assertTest(self, test_name, pixel_tests, expected_checksum=None, drt_output=None, host=None, expected_text=None):
port_name = 'test'
host = host or MockSystemHost()
test.add_unit_tests_to_mock_filesystem(host.filesystem)
port = PortFactory(host).get(port_name)
drt_input, drt_output = self.make_input_output(
port, test_name, pixel_tests, expected_checksum, drt_output, drt_input=None, expected_text=expected_text)
args = ['--run-layout-test', '--platform', port_name, '-']
stdin = io.BytesIO(drt_input)
stdout = io.BytesIO()
stderr = io.BytesIO()
options, args = mock_drt.parse_options(args)
drt = self.make_drt(options, args, host, stdin, stdout, stderr)
res = drt.run()
self.assertEqual(res, 0)
self.assertEqual(stdout.getvalue(), ''.join(drt_output))
self.assertEqual(stderr.getvalue(), '#EOF\n')
def test_main(self):
host = MockSystemHost()
test.add_unit_tests_to_mock_filesystem(host.filesystem)
stdin = io.BytesIO()
stdout = io.BytesIO()
stderr = io.BytesIO()
res = mock_drt.main(['--run-layout-test', '--platform', 'test', '-'],
host, stdin, stdout, stderr)
self.assertEqual(res, 0)
self.assertEqual(stdout.getvalue(), '#READY\n')
self.assertEqual(stderr.getvalue(), '')
self.assertEqual(host.filesystem.written_files, {})
def test_pixeltest_passes(self):
# This also tests that we handle HTTP: test URLs properly.
self.assertTest('http/tests/passes/text.html', True)
def test_pixeltest__fails(self):
self.assertTest('failures/expected/image_checksum.html',
pixel_tests=True,
expected_checksum='image_checksum-checksum',
drt_output=[
'#READY\n',
'Content-Type: text/plain\n',
'image_checksum-txt',
'#EOF\n',
'\n',
'ActualHash: image_checksum-checksum\n',
'ExpectedHash: image_checksum-checksum\n',
'#EOF\n',
])
def test_textonly(self):
self.assertTest('passes/image.html', False)
def test_checksum_in_png(self):
self.assertTest('passes/checksum_in_image.html', True)
def test_reftest_match(self):
self.assertTest('passes/reftest.html', True, expected_checksum='mock-checksum', expected_text='reference text\n')
def test_reftest_mismatch(self):
self.assertTest('passes/mismatch.html', True, expected_checksum='mock-checksum', expected_text='reference text\n')
def test_audio(self):
self.assertTest('passes/audio.html',
pixel_tests=True,
drt_output=[
'#READY\n',
'Content-Type: audio/wav\n',
'Content-Transfer-Encoding: base64\n',
'YXVkaW8td2F2',
'\n',
'#EOF\n',
'#EOF\n',
])
def test_virtual(self):
self.assertTest('virtual/passes/text.html', True)
| 39.435644 | 122 | 0.63407 |
import io
import optparse
import unittest
from webkitpy.common.system.system_host_mock import MockSystemHost
from webkitpy.layout_tests.port import mock_drt
from webkitpy.layout_tests.port import port_testcase
from webkitpy.layout_tests.port import test
from webkitpy.layout_tests.port.factory import PortFactory
class MockDRTPortTest(port_testcase.PortTestCase):
def make_port(self, host=None, options=optparse.Values({'configuration': 'Release'})):
host = host or MockSystemHost()
test.add_unit_tests_to_mock_filesystem(host.filesystem)
return mock_drt.MockDRTPort(host, port_name='mock-mac', options=options)
def test_port_name_in_constructor(self):
self.assertTrue(mock_drt.MockDRTPort(MockSystemHost(), port_name='mock-test'))
def test_check_sys_deps(self):
pass
def test_default_max_locked_shards(self):
pass
def test_diff_image(self):
pass
def test_diff_image_crashed(self):
pass
def test_uses_apache(self):
pass
def test_get_crash_log(self):
pass
def test_check_build(self):
pass
def test_virtual_test_suites(self):
pass
def test_path_to_apache_config_file(self):
pass
class MockDRTTest(unittest.TestCase):
def input_line(self, port, test_name, pixel_tests, checksum=None):
url = port.create_driver(0).test_to_uri(test_name)
if url.startswith('file://'):
url = url[len('file://'):]
if pixel_tests:
url += "'--pixel-test"
if checksum:
url += "'" + checksum
return url + '\n'
def make_drt(self, options, args, host, stdin, stdout, stderr):
return mock_drt.MockDRT(options, args, host, stdin, stdout, stderr)
def make_input_output(self, port, test_name, pixel_tests,
expected_checksum, drt_output, drt_input=None, expected_text=None):
if pixel_tests:
if not expected_checksum:
expected_checksum = port.expected_checksum(test_name)
if not drt_input:
drt_input = self.input_line(port, test_name, pixel_tests, expected_checksum)
text_output = expected_text or port.expected_text(test_name) or ''
if not drt_output:
drt_output = self.expected_output(port, test_name, pixel_tests,
text_output, expected_checksum)
return (drt_input, drt_output)
def expected_output(self, port, test_name, pixel_tests, text_output, expected_checksum):
output = ['#READY\n', 'Content-Type: text/plain\n']
if text_output:
output.append(text_output)
output.append('#EOF\n')
if pixel_tests and expected_checksum:
output.extend(['\n',
'ActualHash: %s\n' % expected_checksum,
'ExpectedHash: %s\n' % expected_checksum])
output.append('#EOF\n')
return output
def assertTest(self, test_name, pixel_tests, expected_checksum=None, drt_output=None, host=None, expected_text=None):
port_name = 'test'
host = host or MockSystemHost()
test.add_unit_tests_to_mock_filesystem(host.filesystem)
port = PortFactory(host).get(port_name)
drt_input, drt_output = self.make_input_output(
port, test_name, pixel_tests, expected_checksum, drt_output, drt_input=None, expected_text=expected_text)
args = ['--run-layout-test', '--platform', port_name, '-']
stdin = io.BytesIO(drt_input)
stdout = io.BytesIO()
stderr = io.BytesIO()
options, args = mock_drt.parse_options(args)
drt = self.make_drt(options, args, host, stdin, stdout, stderr)
res = drt.run()
self.assertEqual(res, 0)
self.assertEqual(stdout.getvalue(), ''.join(drt_output))
self.assertEqual(stderr.getvalue(), '#EOF\n')
def test_main(self):
host = MockSystemHost()
test.add_unit_tests_to_mock_filesystem(host.filesystem)
stdin = io.BytesIO()
stdout = io.BytesIO()
stderr = io.BytesIO()
res = mock_drt.main(['--run-layout-test', '--platform', 'test', '-'],
host, stdin, stdout, stderr)
self.assertEqual(res, 0)
self.assertEqual(stdout.getvalue(), '#READY\n')
self.assertEqual(stderr.getvalue(), '')
self.assertEqual(host.filesystem.written_files, {})
def test_pixeltest_passes(self):
self.assertTest('http/tests/passes/text.html', True)
def test_pixeltest__fails(self):
self.assertTest('failures/expected/image_checksum.html',
pixel_tests=True,
expected_checksum='image_checksum-checksum',
drt_output=[
'#READY\n',
'Content-Type: text/plain\n',
'image_checksum-txt',
'#EOF\n',
'\n',
'ActualHash: image_checksum-checksum\n',
'ExpectedHash: image_checksum-checksum\n',
'#EOF\n',
])
def test_textonly(self):
self.assertTest('passes/image.html', False)
def test_checksum_in_png(self):
self.assertTest('passes/checksum_in_image.html', True)
def test_reftest_match(self):
self.assertTest('passes/reftest.html', True, expected_checksum='mock-checksum', expected_text='reference text\n')
def test_reftest_mismatch(self):
self.assertTest('passes/mismatch.html', True, expected_checksum='mock-checksum', expected_text='reference text\n')
def test_audio(self):
self.assertTest('passes/audio.html',
pixel_tests=True,
drt_output=[
'#READY\n',
'Content-Type: audio/wav\n',
'Content-Transfer-Encoding: base64\n',
'YXVkaW8td2F2',
'\n',
'#EOF\n',
'#EOF\n',
])
def test_virtual(self):
self.assertTest('virtual/passes/text.html', True)
| true | true |
f71e88e8cd933bb1d1bfa245c1fa64a7035fb1eb | 32 | py | Python | helpers/__init__.py | Rensselaer-AI-Leage/GeneralizedGameServer | e6e97371ca5697bb4842911dbf0f961058f09b9e | [
"MIT"
] | null | null | null | helpers/__init__.py | Rensselaer-AI-Leage/GeneralizedGameServer | e6e97371ca5697bb4842911dbf0f961058f09b9e | [
"MIT"
] | 1 | 2016-03-29T22:49:44.000Z | 2016-03-29T22:58:04.000Z | helpers/__init__.py | Rensselaer-AI-League/GeneralizedGameServer | e6e97371ca5697bb4842911dbf0f961058f09b9e | [
"MIT"
] | null | null | null | __all__ = ["config", "message"]
| 16 | 31 | 0.625 | __all__ = ["config", "message"]
| true | true |
f71e89961dbf08ac30188dfd72f19f86d8c382f3 | 1,626 | py | Python | pydocmd/preprocessors/smart.py | vemel/pydoc-markdown | 7cd22c2ec8110df5a67205b7a641581914d0b45a | [
"MIT"
] | 1 | 2021-02-16T10:01:34.000Z | 2021-02-16T10:01:34.000Z | pydocmd/preprocessors/smart.py | vemel/pydoc-markdown | 7cd22c2ec8110df5a67205b7a641581914d0b45a | [
"MIT"
] | null | null | null | pydocmd/preprocessors/smart.py | vemel/pydoc-markdown | 7cd22c2ec8110df5a67205b7a641581914d0b45a | [
"MIT"
] | null | null | null | from pydocmd.preprocessors.rst import Preprocessor as RSTPreprocessor
from pydocmd.preprocessors.google import Preprocessor as GooglePreprocessor
class Preprocessor(object):
"""
This class implements the preprocessor for restructured text and google.
"""
def __init__(self, config=None):
self.config = config
self._google_preprocessor = GooglePreprocessor(config)
self._rst_preprocessor = RSTPreprocessor(config)
def is_google_format(self, docstring):
"""
Check if `docstring` is written in Google docstring format
https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html
"""
lines = [line.strip() for line in docstring.split('\n')]
google_section_names = self._google_preprocessor.get_section_names()
for section_name in google_section_names:
if section_name in lines:
return True
return False
def preprocess_section(self, section):
"""
Preprocessors a given section into it's components.
"""
if self.is_google_format(section.content):
return self._google_preprocessor.preprocess_section(section)
return self._rst_preprocessor.preprocess_section(section)
@staticmethod
def _append_section(lines, key, sections):
section = sections.get(key)
if not section:
return
if lines and lines[-1]:
lines.append('')
# add an extra line because of markdown syntax
lines.extend(['**{}**:'.format(key), ''])
lines.extend(section)
| 32.52 | 83 | 0.660517 | from pydocmd.preprocessors.rst import Preprocessor as RSTPreprocessor
from pydocmd.preprocessors.google import Preprocessor as GooglePreprocessor
class Preprocessor(object):
def __init__(self, config=None):
self.config = config
self._google_preprocessor = GooglePreprocessor(config)
self._rst_preprocessor = RSTPreprocessor(config)
def is_google_format(self, docstring):
lines = [line.strip() for line in docstring.split('\n')]
google_section_names = self._google_preprocessor.get_section_names()
for section_name in google_section_names:
if section_name in lines:
return True
return False
def preprocess_section(self, section):
if self.is_google_format(section.content):
return self._google_preprocessor.preprocess_section(section)
return self._rst_preprocessor.preprocess_section(section)
@staticmethod
def _append_section(lines, key, sections):
section = sections.get(key)
if not section:
return
if lines and lines[-1]:
lines.append('')
lines.extend(['**{}**:'.format(key), ''])
lines.extend(section)
| true | true |
f71e89bcea253798193ff85e6610a0a39c8656d1 | 21,687 | py | Python | ucsmsdk/mometa/sw/SwAccessDomain.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 78 | 2015-11-30T14:10:05.000Z | 2022-02-13T00:29:08.000Z | ucsmsdk/mometa/sw/SwAccessDomain.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 113 | 2015-11-20T09:42:46.000Z | 2022-03-16T16:53:29.000Z | ucsmsdk/mometa/sw/SwAccessDomain.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 86 | 2015-12-12T08:22:18.000Z | 2022-01-23T03:56:34.000Z | """This module contains the general information for SwAccessDomain ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class SwAccessDomainConsts:
FSM_PREV_DEPLOY_BEGIN = "DeployBegin"
FSM_PREV_DEPLOY_FAIL = "DeployFail"
FSM_PREV_DEPLOY_SUCCESS = "DeploySuccess"
FSM_PREV_DEPLOY_UPDATE_CONNECTIVITY = "DeployUpdateConnectivity"
FSM_PREV_NOP = "nop"
FSM_RMT_INV_ERR_CODE_ERR_2FA_AUTH_RETRY = "ERR-2fa-auth-retry"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_FAILED = "ERR-ACTIVATE-failed"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_IN_PROGRESS = "ERR-ACTIVATE-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_RETRY = "ERR-ACTIVATE-retry"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_BIOS = "ERR-BIOS-TOKENS-OLD-BIOS"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_CIMC = "ERR-BIOS-TOKENS-OLD-CIMC"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_NETWORK_BOOT_ORDER_NOT_FOUND = "ERR-BIOS-network-boot-order-not-found"
FSM_RMT_INV_ERR_CODE_ERR_BOARDCTRLUPDATE_IGNORE = "ERR-BOARDCTRLUPDATE-ignore"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_USB_UNMOUNTED = "ERR-DNLD-usb-unmounted"
FSM_RMT_INV_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_IN_PROGRESS = "ERR-Diagnostics-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_MEMTEST_IN_PROGRESS = "ERR-Diagnostics-memtest-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_NETWORK_IN_PROGRESS = "ERR-Diagnostics-network-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
FSM_RMT_INV_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
FSM_RMT_INV_ERR_CODE_ERR_HOST_FRU_IDENTITY_MISMATCH = "ERR-HOST-fru-identity-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_ANALYZE_RESULTS = "ERR-IBMC-analyze-results"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECT_ERROR = "ERR-IBMC-connect-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECTOR_INFO_RETRIEVAL_ERROR = "ERR-IBMC-connector-info-retrieval-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_FRU_RETRIEVAL_ERROR = "ERR-IBMC-fru-retrieval-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_INVALID_END_POINT_CONFIG = "ERR-IBMC-invalid-end-point-config"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_RESULTS_NOT_READY = "ERR-IBMC-results-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_MAX_SUBSCRIPTIONS_ALLOWED_ERROR = "ERR-MAX-subscriptions-allowed-error"
FSM_RMT_INV_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
FSM_RMT_INV_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
FSM_RMT_INV_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
FSM_RMT_INV_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
FSM_RMT_INV_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
FSM_RMT_INV_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_POWER_CAP_UNSUPPORTED = "ERR-POWER-CAP-UNSUPPORTED"
FSM_RMT_INV_ERR_CODE_ERR_POWER_PROFILE_IN_PROGRESS = "ERR-POWER-PROFILE-IN-PROGRESS"
FSM_RMT_INV_ERR_CODE_ERR_SERVER_MIS_CONNECT = "ERR-SERVER-mis-connect"
FSM_RMT_INV_ERR_CODE_ERR_SWITCH_INVALID_IF_CONFIG = "ERR-SWITCH-invalid-if-config"
FSM_RMT_INV_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
FSM_RMT_INV_ERR_CODE_ERR_UNABLE_TO_FETCH_BIOS_SETTINGS = "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_FAILED = "ERR-UPDATE-failed"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_IN_PROGRESS = "ERR-UPDATE-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_RETRY = "ERR-UPDATE-retry"
FSM_RMT_INV_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_ISSUE = "ERR-auth-issue"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_GET_ERROR = "ERR-auth-realm-get-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
FSM_RMT_INV_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
FSM_RMT_INV_ERR_CODE_ERR_CLI_SESSION_LIMIT_REACHED = "ERR-cli-session-limit-reached"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_TP = "ERR-create-tp"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
FSM_RMT_INV_ERR_CODE_ERR_DOWNGRADE_FAIL = "ERR-downgrade-fail"
FSM_RMT_INV_ERR_CODE_ERR_EFI_DIAGNOSTICS_IN_PROGRESS = "ERR-efi-Diagnostics--in-progress"
FSM_RMT_INV_ERR_CODE_ERR_ENABLE_MGMT_CONN = "ERR-enable-mgmt-conn"
FSM_RMT_INV_ERR_CODE_ERR_EP_SET_ERROR = "ERR-ep-set-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
FSM_RMT_INV_ERR_CODE_ERR_INSUFFICIENTLY_EQUIPPED = "ERR-insufficiently-equipped"
FSM_RMT_INV_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
FSM_RMT_INV_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
FSM_RMT_INV_ERR_CODE_ERR_MISSING_METHOD = "ERR-missing-method"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GET_ERROR = "ERR-radius-get-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
FSM_RMT_INV_ERR_CODE_ERR_REQUEST_TIMEOUT = "ERR-request-timeout"
FSM_RMT_INV_ERR_CODE_ERR_RESET_ADAPTER = "ERR-reset-adapter"
FSM_RMT_INV_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
FSM_RMT_INV_ERR_CODE_ERR_SECONDARY_NODE = "ERR-secondary-node"
FSM_RMT_INV_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
FSM_RMT_INV_ERR_CODE_ERR_SET_KEY_CERT = "ERR-set-key-cert"
FSM_RMT_INV_ERR_CODE_ERR_SET_LOGIN_PROFILE = "ERR-set-login-profile"
FSM_RMT_INV_ERR_CODE_ERR_SET_MIN_PASSPHRASE_LENGTH = "ERR-set-min-passphrase-length"
FSM_RMT_INV_ERR_CODE_ERR_SET_NETWORK = "ERR-set-network"
FSM_RMT_INV_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
FSM_RMT_INV_ERR_CODE_ERR_SET_PORT_CHANNEL = "ERR-set-port-channel"
FSM_RMT_INV_ERR_CODE_ERR_STORE_PRE_LOGIN_BANNER_MSG = "ERR-store-pre-login-banner-msg"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_PLUS_GET_ERROR = "ERR-tacacs-plus-get-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_1 = "ERR-test-error-1"
FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_2 = "ERR-test-error-2"
FSM_RMT_INV_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
FSM_RMT_INV_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_PASSWD_EXPIRED = "ERR-user-passwd-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
FSM_RMT_INV_ERR_CODE_ERR_XML_PARSE_ERROR = "ERR-xml-parse-error"
FSM_RMT_INV_ERR_CODE_NONE = "none"
FSM_STAMP_NEVER = "never"
FSM_STATUS_DEPLOY_BEGIN = "DeployBegin"
FSM_STATUS_DEPLOY_FAIL = "DeployFail"
FSM_STATUS_DEPLOY_SUCCESS = "DeploySuccess"
FSM_STATUS_DEPLOY_UPDATE_CONNECTIVITY = "DeployUpdateConnectivity"
FSM_STATUS_NOP = "nop"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
class SwAccessDomain(ManagedObject):
"""This is SwAccessDomain class."""
consts = SwAccessDomainConsts()
naming_props = set([])
mo_meta = MoMeta("SwAccessDomain", "swAccessDomain", "access-eth", VersionMeta.Version101e, "InputOutput", 0x7f, [], ["read-only"], ['networkElement'], ['eventInst', 'faultInst', 'swAccessDomainFsm', 'swAccessDomainFsmTask', 'swAccessEp', 'swSubGroup'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"fsm_descr": MoPropertyMeta("fsm_descr", "fsmDescr", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_prev": MoPropertyMeta("fsm_prev", "fsmPrev", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, ["DeployBegin", "DeployFail", "DeploySuccess", "DeployUpdateConnectivity", "nop"], []),
"fsm_progr": MoPropertyMeta("fsm_progr", "fsmProgr", "byte", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], ["0-100"]),
"fsm_rmt_inv_err_code": MoPropertyMeta("fsm_rmt_inv_err_code", "fsmRmtInvErrCode", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, ["ERR-2fa-auth-retry", "ERR-ACTIVATE-failed", "ERR-ACTIVATE-in-progress", "ERR-ACTIVATE-retry", "ERR-BIOS-TOKENS-OLD-BIOS", "ERR-BIOS-TOKENS-OLD-CIMC", "ERR-BIOS-network-boot-order-not-found", "ERR-BOARDCTRLUPDATE-ignore", "ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNLD-usb-unmounted", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Diagnostics-in-progress", "ERR-Diagnostics-memtest-in-progress", "ERR-Diagnostics-network-in-progress", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-HOST-fru-identity-mismatch", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-IBMC-analyze-results", "ERR-IBMC-connect-error", "ERR-IBMC-connector-info-retrieval-error", "ERR-IBMC-fru-retrieval-error", "ERR-IBMC-invalid-end-point-config", "ERR-IBMC-results-not-ready", "ERR-MAX-subscriptions-allowed-error", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-POWER-CAP-UNSUPPORTED", "ERR-POWER-PROFILE-IN-PROGRESS", "ERR-SERVER-mis-connect", "ERR-SWITCH-invalid-if-config", "ERR-TOKEN-request-denied", "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS", "ERR-UPDATE-failed", "ERR-UPDATE-in-progress", "ERR-UPDATE-retry", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-issue", "ERR-auth-realm-get-error", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-cli-session-limit-reached", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-tp", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-downgrade-fail", "ERR-efi-Diagnostics--in-progress", "ERR-enable-mgmt-conn", "ERR-ep-set-error", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-insufficiently-equipped", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-missing-method", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-get-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-request-timeout", "ERR-reset-adapter", "ERR-role-set-error", "ERR-secondary-node", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-key-cert", "ERR-set-login-profile", "ERR-set-min-passphrase-length", "ERR-set-network", "ERR-set-password-strength-check", "ERR-set-port-channel", "ERR-store-pre-login-banner-msg", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-plus-get-error", "ERR-tacacs-set-error", "ERR-test-error-1", "ERR-test-error-2", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-passwd-expired", "ERR-user-set-error", "ERR-xml-parse-error", "none"], ["0-4294967295"]),
"fsm_rmt_inv_err_descr": MoPropertyMeta("fsm_rmt_inv_err_descr", "fsmRmtInvErrDescr", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, 0, 510, None, [], []),
"fsm_rmt_inv_rslt": MoPropertyMeta("fsm_rmt_inv_rslt", "fsmRmtInvRslt", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"fsm_stage_descr": MoPropertyMeta("fsm_stage_descr", "fsmStageDescr", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_stamp": MoPropertyMeta("fsm_stamp", "fsmStamp", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["never"], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, ["DeployBegin", "DeployFail", "DeploySuccess", "DeployUpdateConnectivity", "nop"], []),
"fsm_try": MoPropertyMeta("fsm_try", "fsmTry", "byte", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"locale": MoPropertyMeta("locale", "locale", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|server|chassis|internal|external),){0,5}(defaultValue|unknown|server|chassis|internal|external){0,1}""", [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version101e, MoPropertyMeta.CREATE_ONLY, 0x8, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["A", "B", "NONE"], []),
"transport": MoPropertyMeta("transport", "transport", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|ether|dce|fc),){0,4}(defaultValue|unknown|ether|dce|fc){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"fsmDescr": "fsm_descr",
"fsmPrev": "fsm_prev",
"fsmProgr": "fsm_progr",
"fsmRmtInvErrCode": "fsm_rmt_inv_err_code",
"fsmRmtInvErrDescr": "fsm_rmt_inv_err_descr",
"fsmRmtInvRslt": "fsm_rmt_inv_rslt",
"fsmStageDescr": "fsm_stage_descr",
"fsmStamp": "fsm_stamp",
"fsmStatus": "fsm_status",
"fsmTry": "fsm_try",
"locale": "locale",
"name": "name",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"switchId": "switch_id",
"transport": "transport",
"type": "type",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.fsm_descr = None
self.fsm_prev = None
self.fsm_progr = None
self.fsm_rmt_inv_err_code = None
self.fsm_rmt_inv_err_descr = None
self.fsm_rmt_inv_rslt = None
self.fsm_stage_descr = None
self.fsm_stamp = None
self.fsm_status = None
self.fsm_try = None
self.locale = None
self.name = None
self.sacl = None
self.status = None
self.switch_id = None
self.transport = None
self.type = None
ManagedObject.__init__(self, "SwAccessDomain", parent_mo_or_dn, **kwargs)
| 92.679487 | 3,774 | 0.763729 |
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class SwAccessDomainConsts:
FSM_PREV_DEPLOY_BEGIN = "DeployBegin"
FSM_PREV_DEPLOY_FAIL = "DeployFail"
FSM_PREV_DEPLOY_SUCCESS = "DeploySuccess"
FSM_PREV_DEPLOY_UPDATE_CONNECTIVITY = "DeployUpdateConnectivity"
FSM_PREV_NOP = "nop"
FSM_RMT_INV_ERR_CODE_ERR_2FA_AUTH_RETRY = "ERR-2fa-auth-retry"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_FAILED = "ERR-ACTIVATE-failed"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_IN_PROGRESS = "ERR-ACTIVATE-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_RETRY = "ERR-ACTIVATE-retry"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_BIOS = "ERR-BIOS-TOKENS-OLD-BIOS"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_CIMC = "ERR-BIOS-TOKENS-OLD-CIMC"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_NETWORK_BOOT_ORDER_NOT_FOUND = "ERR-BIOS-network-boot-order-not-found"
FSM_RMT_INV_ERR_CODE_ERR_BOARDCTRLUPDATE_IGNORE = "ERR-BOARDCTRLUPDATE-ignore"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_USB_UNMOUNTED = "ERR-DNLD-usb-unmounted"
FSM_RMT_INV_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_IN_PROGRESS = "ERR-Diagnostics-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_MEMTEST_IN_PROGRESS = "ERR-Diagnostics-memtest-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_NETWORK_IN_PROGRESS = "ERR-Diagnostics-network-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
FSM_RMT_INV_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
FSM_RMT_INV_ERR_CODE_ERR_HOST_FRU_IDENTITY_MISMATCH = "ERR-HOST-fru-identity-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_ANALYZE_RESULTS = "ERR-IBMC-analyze-results"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECT_ERROR = "ERR-IBMC-connect-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECTOR_INFO_RETRIEVAL_ERROR = "ERR-IBMC-connector-info-retrieval-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_FRU_RETRIEVAL_ERROR = "ERR-IBMC-fru-retrieval-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_INVALID_END_POINT_CONFIG = "ERR-IBMC-invalid-end-point-config"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_RESULTS_NOT_READY = "ERR-IBMC-results-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_MAX_SUBSCRIPTIONS_ALLOWED_ERROR = "ERR-MAX-subscriptions-allowed-error"
FSM_RMT_INV_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
FSM_RMT_INV_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
FSM_RMT_INV_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
FSM_RMT_INV_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
FSM_RMT_INV_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
FSM_RMT_INV_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_POWER_CAP_UNSUPPORTED = "ERR-POWER-CAP-UNSUPPORTED"
FSM_RMT_INV_ERR_CODE_ERR_POWER_PROFILE_IN_PROGRESS = "ERR-POWER-PROFILE-IN-PROGRESS"
FSM_RMT_INV_ERR_CODE_ERR_SERVER_MIS_CONNECT = "ERR-SERVER-mis-connect"
FSM_RMT_INV_ERR_CODE_ERR_SWITCH_INVALID_IF_CONFIG = "ERR-SWITCH-invalid-if-config"
FSM_RMT_INV_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
FSM_RMT_INV_ERR_CODE_ERR_UNABLE_TO_FETCH_BIOS_SETTINGS = "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_FAILED = "ERR-UPDATE-failed"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_IN_PROGRESS = "ERR-UPDATE-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_RETRY = "ERR-UPDATE-retry"
FSM_RMT_INV_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_ISSUE = "ERR-auth-issue"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_GET_ERROR = "ERR-auth-realm-get-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
FSM_RMT_INV_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
FSM_RMT_INV_ERR_CODE_ERR_CLI_SESSION_LIMIT_REACHED = "ERR-cli-session-limit-reached"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_TP = "ERR-create-tp"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
FSM_RMT_INV_ERR_CODE_ERR_DOWNGRADE_FAIL = "ERR-downgrade-fail"
FSM_RMT_INV_ERR_CODE_ERR_EFI_DIAGNOSTICS_IN_PROGRESS = "ERR-efi-Diagnostics--in-progress"
FSM_RMT_INV_ERR_CODE_ERR_ENABLE_MGMT_CONN = "ERR-enable-mgmt-conn"
FSM_RMT_INV_ERR_CODE_ERR_EP_SET_ERROR = "ERR-ep-set-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
FSM_RMT_INV_ERR_CODE_ERR_INSUFFICIENTLY_EQUIPPED = "ERR-insufficiently-equipped"
FSM_RMT_INV_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
FSM_RMT_INV_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
FSM_RMT_INV_ERR_CODE_ERR_MISSING_METHOD = "ERR-missing-method"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GET_ERROR = "ERR-radius-get-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
FSM_RMT_INV_ERR_CODE_ERR_REQUEST_TIMEOUT = "ERR-request-timeout"
FSM_RMT_INV_ERR_CODE_ERR_RESET_ADAPTER = "ERR-reset-adapter"
FSM_RMT_INV_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
FSM_RMT_INV_ERR_CODE_ERR_SECONDARY_NODE = "ERR-secondary-node"
FSM_RMT_INV_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
FSM_RMT_INV_ERR_CODE_ERR_SET_KEY_CERT = "ERR-set-key-cert"
FSM_RMT_INV_ERR_CODE_ERR_SET_LOGIN_PROFILE = "ERR-set-login-profile"
FSM_RMT_INV_ERR_CODE_ERR_SET_MIN_PASSPHRASE_LENGTH = "ERR-set-min-passphrase-length"
FSM_RMT_INV_ERR_CODE_ERR_SET_NETWORK = "ERR-set-network"
FSM_RMT_INV_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
FSM_RMT_INV_ERR_CODE_ERR_SET_PORT_CHANNEL = "ERR-set-port-channel"
FSM_RMT_INV_ERR_CODE_ERR_STORE_PRE_LOGIN_BANNER_MSG = "ERR-store-pre-login-banner-msg"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_PLUS_GET_ERROR = "ERR-tacacs-plus-get-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_1 = "ERR-test-error-1"
FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_2 = "ERR-test-error-2"
FSM_RMT_INV_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
FSM_RMT_INV_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_PASSWD_EXPIRED = "ERR-user-passwd-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
FSM_RMT_INV_ERR_CODE_ERR_XML_PARSE_ERROR = "ERR-xml-parse-error"
FSM_RMT_INV_ERR_CODE_NONE = "none"
FSM_STAMP_NEVER = "never"
FSM_STATUS_DEPLOY_BEGIN = "DeployBegin"
FSM_STATUS_DEPLOY_FAIL = "DeployFail"
FSM_STATUS_DEPLOY_SUCCESS = "DeploySuccess"
FSM_STATUS_DEPLOY_UPDATE_CONNECTIVITY = "DeployUpdateConnectivity"
FSM_STATUS_NOP = "nop"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
class SwAccessDomain(ManagedObject):
consts = SwAccessDomainConsts()
naming_props = set([])
mo_meta = MoMeta("SwAccessDomain", "swAccessDomain", "access-eth", VersionMeta.Version101e, "InputOutput", 0x7f, [], ["read-only"], ['networkElement'], ['eventInst', 'faultInst', 'swAccessDomainFsm', 'swAccessDomainFsmTask', 'swAccessEp', 'swSubGroup'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"fsm_descr": MoPropertyMeta("fsm_descr", "fsmDescr", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_prev": MoPropertyMeta("fsm_prev", "fsmPrev", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, ["DeployBegin", "DeployFail", "DeploySuccess", "DeployUpdateConnectivity", "nop"], []),
"fsm_progr": MoPropertyMeta("fsm_progr", "fsmProgr", "byte", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], ["0-100"]),
"fsm_rmt_inv_err_code": MoPropertyMeta("fsm_rmt_inv_err_code", "fsmRmtInvErrCode", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, ["ERR-2fa-auth-retry", "ERR-ACTIVATE-failed", "ERR-ACTIVATE-in-progress", "ERR-ACTIVATE-retry", "ERR-BIOS-TOKENS-OLD-BIOS", "ERR-BIOS-TOKENS-OLD-CIMC", "ERR-BIOS-network-boot-order-not-found", "ERR-BOARDCTRLUPDATE-ignore", "ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNLD-usb-unmounted", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Diagnostics-in-progress", "ERR-Diagnostics-memtest-in-progress", "ERR-Diagnostics-network-in-progress", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-HOST-fru-identity-mismatch", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-IBMC-analyze-results", "ERR-IBMC-connect-error", "ERR-IBMC-connector-info-retrieval-error", "ERR-IBMC-fru-retrieval-error", "ERR-IBMC-invalid-end-point-config", "ERR-IBMC-results-not-ready", "ERR-MAX-subscriptions-allowed-error", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-POWER-CAP-UNSUPPORTED", "ERR-POWER-PROFILE-IN-PROGRESS", "ERR-SERVER-mis-connect", "ERR-SWITCH-invalid-if-config", "ERR-TOKEN-request-denied", "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS", "ERR-UPDATE-failed", "ERR-UPDATE-in-progress", "ERR-UPDATE-retry", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-issue", "ERR-auth-realm-get-error", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-cli-session-limit-reached", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-tp", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-downgrade-fail", "ERR-efi-Diagnostics--in-progress", "ERR-enable-mgmt-conn", "ERR-ep-set-error", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-insufficiently-equipped", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-missing-method", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-get-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-request-timeout", "ERR-reset-adapter", "ERR-role-set-error", "ERR-secondary-node", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-key-cert", "ERR-set-login-profile", "ERR-set-min-passphrase-length", "ERR-set-network", "ERR-set-password-strength-check", "ERR-set-port-channel", "ERR-store-pre-login-banner-msg", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-plus-get-error", "ERR-tacacs-set-error", "ERR-test-error-1", "ERR-test-error-2", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-passwd-expired", "ERR-user-set-error", "ERR-xml-parse-error", "none"], ["0-4294967295"]),
"fsm_rmt_inv_err_descr": MoPropertyMeta("fsm_rmt_inv_err_descr", "fsmRmtInvErrDescr", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, 0, 510, None, [], []),
"fsm_rmt_inv_rslt": MoPropertyMeta("fsm_rmt_inv_rslt", "fsmRmtInvRslt", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"fsm_stage_descr": MoPropertyMeta("fsm_stage_descr", "fsmStageDescr", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_stamp": MoPropertyMeta("fsm_stamp", "fsmStamp", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["never"], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, ["DeployBegin", "DeployFail", "DeploySuccess", "DeployUpdateConnectivity", "nop"], []),
"fsm_try": MoPropertyMeta("fsm_try", "fsmTry", "byte", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"locale": MoPropertyMeta("locale", "locale", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|server|chassis|internal|external),){0,5}(defaultValue|unknown|server|chassis|internal|external){0,1}""", [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version101e, MoPropertyMeta.CREATE_ONLY, 0x8, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["A", "B", "NONE"], []),
"transport": MoPropertyMeta("transport", "transport", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|ether|dce|fc),){0,4}(defaultValue|unknown|ether|dce|fc){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"fsmDescr": "fsm_descr",
"fsmPrev": "fsm_prev",
"fsmProgr": "fsm_progr",
"fsmRmtInvErrCode": "fsm_rmt_inv_err_code",
"fsmRmtInvErrDescr": "fsm_rmt_inv_err_descr",
"fsmRmtInvRslt": "fsm_rmt_inv_rslt",
"fsmStageDescr": "fsm_stage_descr",
"fsmStamp": "fsm_stamp",
"fsmStatus": "fsm_status",
"fsmTry": "fsm_try",
"locale": "locale",
"name": "name",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"switchId": "switch_id",
"transport": "transport",
"type": "type",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.fsm_descr = None
self.fsm_prev = None
self.fsm_progr = None
self.fsm_rmt_inv_err_code = None
self.fsm_rmt_inv_err_descr = None
self.fsm_rmt_inv_rslt = None
self.fsm_stage_descr = None
self.fsm_stamp = None
self.fsm_status = None
self.fsm_try = None
self.locale = None
self.name = None
self.sacl = None
self.status = None
self.switch_id = None
self.transport = None
self.type = None
ManagedObject.__init__(self, "SwAccessDomain", parent_mo_or_dn, **kwargs)
| true | true |
f71e8a1667be90cf32f5b0d97e063dc0e58bd349 | 1,061 | py | Python | venv/lib/python3.8/site-packages/vsts/project_analysis/v4_0/models/code_change_trend_item.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/project_analysis/v4_0/models/code_change_trend_item.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/project_analysis/v4_0/models/code_change_trend_item.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class CodeChangeTrendItem(Model):
"""CodeChangeTrendItem.
:param time:
:type time: datetime
:param value:
:type value: int
"""
_attribute_map = {
'time': {'key': 'time', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': 'int'}
}
def __init__(self, time=None, value=None):
super(CodeChangeTrendItem, self).__init__()
self.time = time
self.value = value
| 35.366667 | 95 | 0.462771 |
from msrest.serialization import Model
class CodeChangeTrendItem(Model):
_attribute_map = {
'time': {'key': 'time', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': 'int'}
}
def __init__(self, time=None, value=None):
super(CodeChangeTrendItem, self).__init__()
self.time = time
self.value = value
| true | true |
f71e8a720251ce2de0a066d3b179ea50b2fd4a45 | 4,594 | py | Python | tencentcloud/ocr/v20181119/ocr_client.py | liangzhengkang/tencentcloud-sdk-python | c8f990b33f3701e04149a3d613538829a88269eb | [
"Apache-2.0"
] | null | null | null | tencentcloud/ocr/v20181119/ocr_client.py | liangzhengkang/tencentcloud-sdk-python | c8f990b33f3701e04149a3d613538829a88269eb | [
"Apache-2.0"
] | null | null | null | tencentcloud/ocr/v20181119/ocr_client.py | liangzhengkang/tencentcloud-sdk-python | c8f990b33f3701e04149a3d613538829a88269eb | [
"Apache-2.0"
] | 1 | 2019-03-25T02:21:47.000Z | 2019-03-25T02:21:47.000Z | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.ocr.v20181119 import models
class OcrClient(AbstractClient):
_apiVersion = '2018-11-19'
_endpoint = 'ocr.tencentcloudapi.com'
def GeneralBasicOCR(self, request):
"""通用印刷体识别接口用于提供图像整体文字的检测和识别服务,返回文字框位置与文字内容。支持多场景、任意版面下整图文字的识别,以及中英文、字母、数字和日文、韩文的识别。应用场景包括:印刷文档识别、网络图片识别、广告图文字识别、街景店招识别、菜单识别、视频标题识别、头像文字识别等。
:param request: 调用GeneralBasicOCR所需参数的结构体。
:type request: :class:`tencentcloud.ocr.v20181119.models.GeneralBasicOCRRequest`
:rtype: :class:`tencentcloud.ocr.v20181119.models.GeneralBasicOCRResponse`
"""
try:
params = request._serialize()
body = self.call("GeneralBasicOCR", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GeneralBasicOCRResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def GeneralFastOCR(self, request):
"""通用印刷体识别(高速版)接口用于提供图像整体文字的检测和识别服务,返回文字框位置与文字内容。相比通用印刷体识别接口,识别速度更快、支持的QPS更高。
:param request: 调用GeneralFastOCR所需参数的结构体。
:type request: :class:`tencentcloud.ocr.v20181119.models.GeneralFastOCRRequest`
:rtype: :class:`tencentcloud.ocr.v20181119.models.GeneralFastOCRResponse`
"""
try:
params = request._serialize()
body = self.call("GeneralFastOCR", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GeneralFastOCRResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def IDCardOCR(self, request):
"""身份证识别接口支持二代身份证正反面所有字段的识别,包括姓名、性别、民族、出生日期、住址、公民身份证号、签发机关、有效期限;具备身份证照片、人像照片的裁剪功能和翻拍件、复印件的识别告警功能。应用场景包括:银行开户、用户注册、人脸核身等各种身份证信息有效性核验场景。
:param request: 调用IDCardOCR所需参数的结构体。
:type request: :class:`tencentcloud.ocr.v20181119.models.IDCardOCRRequest`
:rtype: :class:`tencentcloud.ocr.v20181119.models.IDCardOCRResponse`
"""
try:
params = request._serialize()
body = self.call("IDCardOCR", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.IDCardOCRResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | 42.146789 | 148 | 0.637135 |
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.ocr.v20181119 import models
class OcrClient(AbstractClient):
_apiVersion = '2018-11-19'
_endpoint = 'ocr.tencentcloudapi.com'
def GeneralBasicOCR(self, request):
try:
params = request._serialize()
body = self.call("GeneralBasicOCR", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GeneralBasicOCRResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def GeneralFastOCR(self, request):
try:
params = request._serialize()
body = self.call("GeneralFastOCR", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GeneralFastOCRResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def IDCardOCR(self, request):
try:
params = request._serialize()
body = self.call("IDCardOCR", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.IDCardOCRResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | true | true |
f71e8ab3c51338680d4b08a028ea1ec34d4be34f | 2,675 | py | Python | wxviews/core/tests/pipes_tests.py | eumis/wxviews | 7b3adffb119e480807276ea3149d878c3879baaf | [
"MIT"
] | 6 | 2018-04-12T20:30:57.000Z | 2020-03-25T12:57:00.000Z | wxviews/core/tests/pipes_tests.py | eumis/wxviews | 7b3adffb119e480807276ea3149d878c3879baaf | [
"MIT"
] | null | null | null | wxviews/core/tests/pipes_tests.py | eumis/wxviews | 7b3adffb119e480807276ea3149d878c3879baaf | [
"MIT"
] | null | null | null | from unittest.mock import Mock, call, patch
from pytest import fixture, mark, fail
from pyviews.core import XmlAttr
from wxviews.core import pipes, WxRenderingContext
from wxviews.core.pipes import apply_attributes, add_to_sizer
from wxviews.widgets import WxNode
class TestControl:
def __init__(self):
self.node_key = None
self.instance_key = None
class TestNode(WxNode):
def __init__(self, widget):
super().__init__(widget, Mock())
self.node_key = None
@fixture
def apply_attribute_fixture(request):
with patch(pipes.__name__ + '.apply_attribute') as apply_attribute_mock:
request.cls.apply_attribute = apply_attribute_mock
yield apply_attribute_mock
@mark.usefixtures('apply_attribute_fixture')
class ApplyAttributesTests:
"""apply_attributes() step tests"""
@mark.parametrize('attr', [
XmlAttr('key', 'value', 'init')
])
def test_skip_special_attributes(self, attr):
"""should skip attributes with "init" and "sizer" namespaces"""
self.apply_attribute.reset_mock()
node = Mock(xml_node=Mock(attrs=[attr]))
apply_attributes(node, WxRenderingContext())
assert not self.apply_attribute.called
@mark.parametrize('attrs', [
[XmlAttr('key', 'value')],
[XmlAttr('key', 'value', ''), XmlAttr('other_key', 'key', 'some namespace')]
])
def test_apply_attributes(self, attrs):
"""should apply passed attributes"""
self.apply_attribute.reset_mock()
node = Mock(xml_node=Mock(attrs=attrs))
apply_attributes(node, WxRenderingContext())
assert self.apply_attribute.call_args_list == [call(node, attr) for attr in attrs]
class AddToSizerTests:
"""add_to_sizer() step tests"""
@staticmethod
def _get_mocks(sizer_args=None, node_globals=None):
sizer_args = sizer_args if sizer_args else {}
node = Mock(sizer_args=sizer_args, node_globals=node_globals, instace=Mock())
return node, Mock()
@mark.parametrize('sizer_args', [
{},
{'key': 'value'},
{'key': 'value', 'one': 1}
])
def test_passes_attr_args(self, sizer_args):
"""should call sizer.Add with node.sizer_args"""
node, sizer = self._get_mocks(sizer_args)
add_to_sizer(node, WxRenderingContext({'sizer': sizer}))
assert sizer.Add.call_args == call(node.instance, **sizer_args)
def test_skips_if_sizer_missed(self):
"""should skip if sizer is missed"""
node = self._get_mocks()[0]
try:
add_to_sizer(node, WxRenderingContext())
except BaseException:
fail()
| 29.722222 | 90 | 0.662804 | from unittest.mock import Mock, call, patch
from pytest import fixture, mark, fail
from pyviews.core import XmlAttr
from wxviews.core import pipes, WxRenderingContext
from wxviews.core.pipes import apply_attributes, add_to_sizer
from wxviews.widgets import WxNode
class TestControl:
def __init__(self):
self.node_key = None
self.instance_key = None
class TestNode(WxNode):
def __init__(self, widget):
super().__init__(widget, Mock())
self.node_key = None
@fixture
def apply_attribute_fixture(request):
with patch(pipes.__name__ + '.apply_attribute') as apply_attribute_mock:
request.cls.apply_attribute = apply_attribute_mock
yield apply_attribute_mock
@mark.usefixtures('apply_attribute_fixture')
class ApplyAttributesTests:
@mark.parametrize('attr', [
XmlAttr('key', 'value', 'init')
])
def test_skip_special_attributes(self, attr):
self.apply_attribute.reset_mock()
node = Mock(xml_node=Mock(attrs=[attr]))
apply_attributes(node, WxRenderingContext())
assert not self.apply_attribute.called
@mark.parametrize('attrs', [
[XmlAttr('key', 'value')],
[XmlAttr('key', 'value', ''), XmlAttr('other_key', 'key', 'some namespace')]
])
def test_apply_attributes(self, attrs):
self.apply_attribute.reset_mock()
node = Mock(xml_node=Mock(attrs=attrs))
apply_attributes(node, WxRenderingContext())
assert self.apply_attribute.call_args_list == [call(node, attr) for attr in attrs]
class AddToSizerTests:
@staticmethod
def _get_mocks(sizer_args=None, node_globals=None):
sizer_args = sizer_args if sizer_args else {}
node = Mock(sizer_args=sizer_args, node_globals=node_globals, instace=Mock())
return node, Mock()
@mark.parametrize('sizer_args', [
{},
{'key': 'value'},
{'key': 'value', 'one': 1}
])
def test_passes_attr_args(self, sizer_args):
node, sizer = self._get_mocks(sizer_args)
add_to_sizer(node, WxRenderingContext({'sizer': sizer}))
assert sizer.Add.call_args == call(node.instance, **sizer_args)
def test_skips_if_sizer_missed(self):
node = self._get_mocks()[0]
try:
add_to_sizer(node, WxRenderingContext())
except BaseException:
fail()
| true | true |
f71e8b07ffd796c578661e541401ebb3b60cb3f3 | 4,489 | py | Python | tests/test_estimators.py | amgrigoriev/daal4py | 97fbe7a9181410dac348dc724178e8605492e3c4 | [
"Apache-2.0"
] | null | null | null | tests/test_estimators.py | amgrigoriev/daal4py | 97fbe7a9181410dac348dc724178e8605492e3c4 | [
"Apache-2.0"
] | null | null | null | tests/test_estimators.py | amgrigoriev/daal4py | 97fbe7a9181410dac348dc724178e8605492e3c4 | [
"Apache-2.0"
] | null | null | null | #*******************************************************************************
# Copyright 2014-2020 Intel Corporation
# All Rights Reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License"), the following terms apply:
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#*******************************************************************************
import unittest
from sklearn.utils.estimator_checks import check_estimator
import sklearn.utils.estimator_checks
from daal4py import __daal_run_version__
daal_run_version = tuple(map(int, (__daal_run_version__[0:4], __daal_run_version__[4:8])))
from daal4py.sklearn.neighbors import KNeighborsClassifier
from daal4py.sklearn.ensemble import RandomForestClassifier
from daal4py.sklearn.ensemble import RandomForestRegressor
from daal4py.sklearn.ensemble import GBTDAALClassifier
from daal4py.sklearn.ensemble import GBTDAALRegressor
from daal4py.sklearn.ensemble import AdaBoostClassifier
from daal4py import __daal_link_version__ as dv
daal_version = tuple(map(int, (dv[0:4], dv[4:8])))
def check_version(rule, target):
if not isinstance(rule[0], type(target)):
if rule > target:
return False
else:
for rule_item in range(len(rule)):
if rule[rule_item] > target:
return False
else:
if rule[rule_item][0]==target[0]:
break
return True
def _replace_and_save(md, fns, replacing_fn):
"""
Replaces functions in `fns` list in `md` module with `replacing_fn`.
Returns the dictionary with functions that were replaced.
"""
saved = dict()
for check_f in fns:
try:
fn = getattr(md, check_f)
setattr(md, check_f, replacing_fn)
saved[check_f] = fn
except:
pass
return saved
def _restore_from_saved(md, saved_dict):
"""
Restores functions in `md` that were replaced in the function above.
"""
for check_f in saved_dict:
setattr(md, check_f, saved_dict[check_f])
class Test(unittest.TestCase):
def test_KNeighborsClassifier(self):
check_estimator(KNeighborsClassifier)
@unittest.skipUnless(check_version(((2019,0),(2021, 107)), daal_version), "not supported in this library version")
def test_RandomForestClassifier(self):
# check_methods_subset_invariance fails.
# Issue is created:
# https://github.com/IntelPython/daal4py/issues/129
# Skip the test
def dummy(*args, **kwargs):
pass
md = sklearn.utils.estimator_checks
saved = _replace_and_save(md, ['check_methods_subset_invariance', 'check_dict_unchanged'], dummy)
check_estimator(RandomForestClassifier)
_restore_from_saved(md, saved)
def test_RandomForestRegressor(self):
# check_fit_idempotent is known to fail with DAAL's decision
# forest regressor, due to different partitioning of data
# between threads from run to run.
# Hence skip that test
def dummy(*args, **kwargs):
pass
md = sklearn.utils.estimator_checks
saved = _replace_and_save(md, ['check_methods_subset_invariance', 'check_dict_unchanged'], dummy)
check_estimator(RandomForestRegressor)
_restore_from_saved(md, saved)
def test_GBTDAALClassifier(self):
check_estimator(GBTDAALClassifier)
def test_GBTDAALRegressor(self):
def dummy(*args, **kwargs):
pass
md = sklearn.utils.estimator_checks
# got unexpected slightly different prediction result between two same calls in this test
saved = _replace_and_save(md, ['check_estimators_data_not_an_array'], dummy)
check_estimator(GBTDAALRegressor)
_restore_from_saved(md, saved)
@unittest.skipIf(daal_run_version < (2020, 0), "not supported in this library version")
def test_AdaBoostClassifier(self):
check_estimator(AdaBoostClassifier)
if __name__ == '__main__':
unittest.main()
| 35.626984 | 118 | 0.676543 |
import unittest
from sklearn.utils.estimator_checks import check_estimator
import sklearn.utils.estimator_checks
from daal4py import __daal_run_version__
daal_run_version = tuple(map(int, (__daal_run_version__[0:4], __daal_run_version__[4:8])))
from daal4py.sklearn.neighbors import KNeighborsClassifier
from daal4py.sklearn.ensemble import RandomForestClassifier
from daal4py.sklearn.ensemble import RandomForestRegressor
from daal4py.sklearn.ensemble import GBTDAALClassifier
from daal4py.sklearn.ensemble import GBTDAALRegressor
from daal4py.sklearn.ensemble import AdaBoostClassifier
from daal4py import __daal_link_version__ as dv
daal_version = tuple(map(int, (dv[0:4], dv[4:8])))
def check_version(rule, target):
if not isinstance(rule[0], type(target)):
if rule > target:
return False
else:
for rule_item in range(len(rule)):
if rule[rule_item] > target:
return False
else:
if rule[rule_item][0]==target[0]:
break
return True
def _replace_and_save(md, fns, replacing_fn):
saved = dict()
for check_f in fns:
try:
fn = getattr(md, check_f)
setattr(md, check_f, replacing_fn)
saved[check_f] = fn
except:
pass
return saved
def _restore_from_saved(md, saved_dict):
for check_f in saved_dict:
setattr(md, check_f, saved_dict[check_f])
class Test(unittest.TestCase):
def test_KNeighborsClassifier(self):
check_estimator(KNeighborsClassifier)
@unittest.skipUnless(check_version(((2019,0),(2021, 107)), daal_version), "not supported in this library version")
def test_RandomForestClassifier(self):
def dummy(*args, **kwargs):
pass
md = sklearn.utils.estimator_checks
saved = _replace_and_save(md, ['check_methods_subset_invariance', 'check_dict_unchanged'], dummy)
check_estimator(RandomForestClassifier)
_restore_from_saved(md, saved)
def test_RandomForestRegressor(self):
# forest regressor, due to different partitioning of data
# between threads from run to run.
# Hence skip that test
def dummy(*args, **kwargs):
pass
md = sklearn.utils.estimator_checks
saved = _replace_and_save(md, ['check_methods_subset_invariance', 'check_dict_unchanged'], dummy)
check_estimator(RandomForestRegressor)
_restore_from_saved(md, saved)
def test_GBTDAALClassifier(self):
check_estimator(GBTDAALClassifier)
def test_GBTDAALRegressor(self):
def dummy(*args, **kwargs):
pass
md = sklearn.utils.estimator_checks
# got unexpected slightly different prediction result between two same calls in this test
saved = _replace_and_save(md, ['check_estimators_data_not_an_array'], dummy)
check_estimator(GBTDAALRegressor)
_restore_from_saved(md, saved)
@unittest.skipIf(daal_run_version < (2020, 0), "not supported in this library version")
def test_AdaBoostClassifier(self):
check_estimator(AdaBoostClassifier)
if __name__ == '__main__':
unittest.main()
| true | true |
f71e8b1a591ecfd26ed606bf4d5a7a7fd8179642 | 783 | py | Python | glue_vispy_viewers/extern/vispy/io/__init__.py | jzuhone/glue-vispy-viewers | d940705f4ba95f8d7a9a74d37fb68c71080b490a | [
"BSD-2-Clause"
] | 3 | 2018-05-09T17:55:53.000Z | 2019-07-22T09:14:41.000Z | glue_vispy_viewers/extern/vispy/io/__init__.py | jzuhone/glue-vispy-viewers | d940705f4ba95f8d7a9a74d37fb68c71080b490a | [
"BSD-2-Clause"
] | 9 | 2017-04-07T01:44:15.000Z | 2018-12-16T20:47:08.000Z | graphViz/vispy/io/__init__.py | onecklam/ethereum-graphviz | 6993accf0cb85e23013bf7ae6b04145724a6dbd2 | [
"Apache-2.0"
] | 1 | 2021-09-15T08:52:26.000Z | 2021-09-15T08:52:26.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Utilities related to data reading, writing, fetching, and generation.
"""
from os import path as _op
from .datasets import (load_iris, load_crate, load_data_file, # noqa
load_spatial_filters) # noqa
from .mesh import read_mesh, write_mesh # noqa
from .image import (read_png, write_png, imread, imsave, _make_png, # noqa
_check_img_lib) # noqa
_data_dir = _op.join(_op.dirname(__file__), '_data')
__all__ = ['imread', 'imsave', 'load_iris', 'load_crate',
'load_spatial_filters', 'load_data_file',
'read_mesh', 'read_png', 'write_mesh',
'write_png']
| 34.043478 | 75 | 0.659004 |
from os import path as _op
from .datasets import (load_iris, load_crate, load_data_file,
load_spatial_filters)
from .mesh import read_mesh, write_mesh
from .image import (read_png, write_png, imread, imsave, _make_png,
_check_img_lib)
_data_dir = _op.join(_op.dirname(__file__), '_data')
__all__ = ['imread', 'imsave', 'load_iris', 'load_crate',
'load_spatial_filters', 'load_data_file',
'read_mesh', 'read_png', 'write_mesh',
'write_png']
| true | true |
f71e8b9167da6ce9a96bc811cf78357b138536ca | 2,654 | py | Python | dfp/create_creatives.py | togetter/dfp-prebid-setup | a9d0b2c60558c9b561de430a4f0b191996c98da0 | [
"MIT"
] | null | null | null | dfp/create_creatives.py | togetter/dfp-prebid-setup | a9d0b2c60558c9b561de430a4f0b191996c98da0 | [
"MIT"
] | null | null | null | dfp/create_creatives.py | togetter/dfp-prebid-setup | a9d0b2c60558c9b561de430a4f0b191996c98da0 | [
"MIT"
] | null | null | null |
import logging
import os
import pprint
from googleads import ad_manager
from dfp.client import get_client
logger = logging.getLogger(__name__)
def create_creatives(creatives):
"""
Creates creatives in DFP.
Args:
creatives (arr): an array of objects, each a creative configuration
Returns:
an array: an array of created creative IDs
"""
dfp_client = get_client()
creative_service = dfp_client.GetService('CreativeService',
version='v201811')
creatives = creative_service.createCreatives(creatives)
# Return IDs of created line items.
created_creative_ids = []
for creative in creatives:
created_creative_ids.append(creative['id'])
logger.info(u'Created creative with name "{name}".'.format(name=creative['name']))
return created_creative_ids
def create_creative_config(name, advertiser_id):
"""
Creates a creative config object.
Args:
name (str): the name of the creative
advertiser_id (int): the ID of the advertiser in DFP
Returns:
an object: the line item config
"""
snippet_file_path = os.path.join(os.path.dirname(__file__),
'creative_snippet.html')
with open(snippet_file_path, 'r') as snippet_file:
snippet = snippet_file.read()
# https://developers.google.com/doubleclick-publishers/docs/reference/v201802/CreativeService.Creative
config = {
'xsi_type': 'ThirdPartyCreative',
'name': name,
'advertiserId': advertiser_id,
'size': {
'width': '1',
'height': '1'
},
'snippet': snippet,
'isSafeFrameCompatible': True,
}
return config
def build_creative_name(order_name, creative_num):
"""
Returns a name for a creative.
Args:
order_name (int): the name of the order in DFP
creative_num (int): the num_creatives distinguising this creative from any
duplicates
Returns:
a string
"""
return 'HB {order_name}, #{num}'.format(
order_name=order_name, num=creative_num)
def create_duplicate_creative_configs(order_name, advertiser_id,
num_creatives=1):
"""
Returns an array of creative config object.
Args:
order_name (int): the name of the order in DFP
advertiser_id (int): the ID of the advertiser in DFP
num_creatives (int): how many creative configs to generate
Returns:
an array: an array of length `num_creatives`, each item a line item config
"""
creative_configs = []
for creative_num in range(1, num_creatives + 1):
config = create_creative_config(
name=build_creative_name(order_name, creative_num),
advertiser_id=advertiser_id,
)
creative_configs.append(config)
return creative_configs
| 26.54 | 104 | 0.708365 |
import logging
import os
import pprint
from googleads import ad_manager
from dfp.client import get_client
logger = logging.getLogger(__name__)
def create_creatives(creatives):
dfp_client = get_client()
creative_service = dfp_client.GetService('CreativeService',
version='v201811')
creatives = creative_service.createCreatives(creatives)
created_creative_ids = []
for creative in creatives:
created_creative_ids.append(creative['id'])
logger.info(u'Created creative with name "{name}".'.format(name=creative['name']))
return created_creative_ids
def create_creative_config(name, advertiser_id):
snippet_file_path = os.path.join(os.path.dirname(__file__),
'creative_snippet.html')
with open(snippet_file_path, 'r') as snippet_file:
snippet = snippet_file.read()
config = {
'xsi_type': 'ThirdPartyCreative',
'name': name,
'advertiserId': advertiser_id,
'size': {
'width': '1',
'height': '1'
},
'snippet': snippet,
'isSafeFrameCompatible': True,
}
return config
def build_creative_name(order_name, creative_num):
return 'HB {order_name}, #{num}'.format(
order_name=order_name, num=creative_num)
def create_duplicate_creative_configs(order_name, advertiser_id,
num_creatives=1):
creative_configs = []
for creative_num in range(1, num_creatives + 1):
config = create_creative_config(
name=build_creative_name(order_name, creative_num),
advertiser_id=advertiser_id,
)
creative_configs.append(config)
return creative_configs
| true | true |
f71e8d1c9a2ded59f56da7cb9494713a1bd65190 | 354 | py | Python | pwgen.py | anokata/pythonPetProjects | 245c3ff11ae560b17830970061d8d60013948fd7 | [
"MIT"
] | 3 | 2017-04-30T17:44:53.000Z | 2018-02-03T06:02:11.000Z | pwgen.py | anokata/pythonPetProjects | 245c3ff11ae560b17830970061d8d60013948fd7 | [
"MIT"
] | 10 | 2021-03-18T20:17:19.000Z | 2022-03-11T23:14:19.000Z | pwgen.py | anokata/pythonPetProjects | 245c3ff11ae560b17830970061d8d60013948fd7 | [
"MIT"
] | null | null | null | for a in range(10):
for b in range(10):
for c in range(10):
for d in range(10):
for e in range(10):
for f in range(10):
for g in range(10):
for h in range(10):
print("{}{}{}{}{}{}{}{}".format(a,b,c,d,e,f,g,h))
| 35.4 | 81 | 0.347458 | for a in range(10):
for b in range(10):
for c in range(10):
for d in range(10):
for e in range(10):
for f in range(10):
for g in range(10):
for h in range(10):
print("{}{}{}{}{}{}{}{}".format(a,b,c,d,e,f,g,h))
| true | true |
f71e8e3cc6255da0daeb05d09e65a982f946b838 | 521 | py | Python | elements/python/11/1/soln.py | mmcloughlin/problems | 6095842ffe007a12ec8c2093850515aa4e046616 | [
"MIT"
] | 11 | 2019-02-08T06:54:34.000Z | 2021-08-07T18:57:39.000Z | elements/python/11/1/soln.py | mmcloughlin/problems | 6095842ffe007a12ec8c2093850515aa4e046616 | [
"MIT"
] | 1 | 2019-05-21T08:14:10.000Z | 2019-05-21T08:14:10.000Z | elements/python/11/1/soln.py | mmcloughlin/problems | 6095842ffe007a12ec8c2093850515aa4e046616 | [
"MIT"
] | null | null | null | import heapq
import random
def merge(lists):
heapq.heapify(lists)
m = []
while len(lists) > 0:
l = heapq.heappop(lists)
if len(l) == 0:
continue
m.append(l.pop(0))
heapq.heappush(lists, l)
return m
def test(n, k):
lists = [[] for _ in xrange(k)]
for i in xrange(n):
lists[random.randrange(k)].append(i)
m = merge(lists)
assert m == range(n)
print 'pass'
def main():
test(100000, 50)
if __name__ == '__main__':
main()
| 16.28125 | 44 | 0.539347 | import heapq
import random
def merge(lists):
heapq.heapify(lists)
m = []
while len(lists) > 0:
l = heapq.heappop(lists)
if len(l) == 0:
continue
m.append(l.pop(0))
heapq.heappush(lists, l)
return m
def test(n, k):
lists = [[] for _ in xrange(k)]
for i in xrange(n):
lists[random.randrange(k)].append(i)
m = merge(lists)
assert m == range(n)
print 'pass'
def main():
test(100000, 50)
if __name__ == '__main__':
main()
| false | true |
f71e8ea85b6b54c6670609e8e6c0a91688ec4952 | 87 | py | Python | pyjswidgets/pyjamas/XMLDoc.py | takipsizad/pyjs | 54db0ba6747aca744f9f3c3e985a17e913dfb951 | [
"ECL-2.0",
"Apache-2.0"
] | 739 | 2015-01-01T02:05:11.000Z | 2022-03-30T15:26:16.000Z | pyjswidgets/pyjamas/XMLDoc.py | takipsizad/pyjs | 54db0ba6747aca744f9f3c3e985a17e913dfb951 | [
"ECL-2.0",
"Apache-2.0"
] | 33 | 2015-03-25T23:17:04.000Z | 2021-08-19T08:25:22.000Z | pyjswidgets/pyjamas/XMLDoc.py | takipsizad/pyjs | 54db0ba6747aca744f9f3c3e985a17e913dfb951 | [
"ECL-2.0",
"Apache-2.0"
] | 167 | 2015-01-01T22:27:47.000Z | 2022-03-17T13:29:19.000Z | from __pyjamas__ import get_main_frame, JS
def create_xml_doc(text):
return None
| 14.5 | 42 | 0.781609 | from __pyjamas__ import get_main_frame, JS
def create_xml_doc(text):
return None
| true | true |
f71e8f3a720d0140da369b16a9db389ed78c68db | 1,016 | py | Python | mak/libs/ircc/ir_grammar/ir_opcodes/ir_vector.py | bugengine/BugEngine | 1b3831d494ee06b0bd74a8227c939dd774b91226 | [
"BSD-3-Clause"
] | 4 | 2015-05-13T16:28:36.000Z | 2017-05-24T15:34:14.000Z | mak/libs/ircc/ir_grammar/ir_opcodes/ir_vector.py | bugengine/BugEngine | 1b3831d494ee06b0bd74a8227c939dd774b91226 | [
"BSD-3-Clause"
] | null | null | null | mak/libs/ircc/ir_grammar/ir_opcodes/ir_vector.py | bugengine/BugEngine | 1b3831d494ee06b0bd74a8227c939dd774b91226 | [
"BSD-3-Clause"
] | 1 | 2017-03-21T08:28:07.000Z | 2017-03-21T08:28:07.000Z | from ...ir_ast.instructions import IrInstExtractElement, IrInstInsertElement, IrInstShuffleVector
from be_typing import TYPE_CHECKING
def p_ir_opcode_vector_extract(p):
# type: (YaccProduction) -> None
"""
ir-opcode : ir-instruction-assignment EXTRACTELEMENT ir-value COMMA ir-value ir-instruction-attachment-list
"""
p[0] = IrInstExtractElement(p[1], p[3], p[5], p[6])
def p_ir_opcode_vector_insert(p):
# type: (YaccProduction) -> None
"""
ir-opcode : ir-instruction-assignment INSERTELEMENT ir-value COMMA ir-value COMMA ir-value ir-instruction-attachment-list
"""
p[0] = IrInstInsertElement(p[1], p[3], p[5], p[7], p[8])
def p_ir_opcode_vector_shuffle(p):
# type: (YaccProduction) -> None
"""
ir-opcode : ir-instruction-assignment SHUFFLEVECTOR ir-value COMMA ir-value COMMA ir-value ir-instruction-attachment-list
"""
p[0] = IrInstShuffleVector(p[1], p[3], p[5], p[7], p[8])
if TYPE_CHECKING:
from ply.yacc import YaccProduction | 33.866667 | 129 | 0.694882 | from ...ir_ast.instructions import IrInstExtractElement, IrInstInsertElement, IrInstShuffleVector
from be_typing import TYPE_CHECKING
def p_ir_opcode_vector_extract(p):
p[0] = IrInstExtractElement(p[1], p[3], p[5], p[6])
def p_ir_opcode_vector_insert(p):
p[0] = IrInstInsertElement(p[1], p[3], p[5], p[7], p[8])
def p_ir_opcode_vector_shuffle(p):
p[0] = IrInstShuffleVector(p[1], p[3], p[5], p[7], p[8])
if TYPE_CHECKING:
from ply.yacc import YaccProduction | true | true |
f71e8f9b1530c92926a1479f24db743c1cf1dcfc | 3,414 | py | Python | modules/boost/simd/predicates/script/is_eqz.py | timblechmann/nt2 | 6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce | [
"BSL-1.0"
] | 2 | 2016-09-14T00:23:53.000Z | 2018-01-14T12:51:18.000Z | modules/boost/simd/predicates/script/is_eqz.py | timblechmann/nt2 | 6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce | [
"BSL-1.0"
] | null | null | null | modules/boost/simd/predicates/script/is_eqz.py | timblechmann/nt2 | 6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce | [
"BSL-1.0"
] | null | null | null | [ ## this file was manually modified by jt
{
'functor' : {
'description' : ['Returns True<result_type>() or False<result_type>() according a0 is zero or not.'],
'module' : 'boost',
'arity' : '1',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'typename boost::simd::meta::as_logical<T>::type',
},
'simd_types' : ['real_'],
'special' : ['predicate'],
'type_defs' : [],
'types' : ['real_', 'signed_int_', 'unsigned_int_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'created by jt the 21/02/2011',
'included' : ['#include <boost/simd/sdk/simd/logical.hpp>'],
'no_ulp' : 'True',
'notes' : [],
'stamp' : 'modified by jt the 21/02/2011',
},
'ranges' : {
'default' : [['T(-10000)', 'T(10000)']],
},
'specific_values' : {
'default' : {
'boost::simd::One<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Two<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Zero<T>()' : {'result' : 'boost::simd::True<r_t>()','ulp_thresh' : '0.5',},
},
'real_' : {
'boost::simd::Mzero<T>()' : {'result' : 'boost::simd::True<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Half<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Inf<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Minf<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Mone<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Nan<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::One<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Quarter<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Two<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Zero<T>()' : {'result' : 'boost::simd::True<r_t>()','ulp_thresh' : '0.5',},
},
'signed_int_' : {
'boost::simd::Mone<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::One<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Two<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Zero<T>()' : {'result' : 'boost::simd::True<r_t>()','ulp_thresh' : '0.5',},
},
},
'verif_test' : {
'property_call' : {
'default' : ['boost::simd::is_eqz(a0)'],
},
'property_value' : {
'default' : ['a0==0'],
},
'simd' : {
},
'ulp_thresh' : {
'default' : ['0'],
},
},
},
},
]
| 48.771429 | 110 | 0.411541 | [ cription' : ['Returns True<result_type>() or False<result_type>() according a0 is zero or not.'],
'module' : 'boost',
'arity' : '1',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'typename boost::simd::meta::as_logical<T>::type',
},
'simd_types' : ['real_'],
'special' : ['predicate'],
'type_defs' : [],
'types' : ['real_', 'signed_int_', 'unsigned_int_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'created by jt the 21/02/2011',
'included' : ['#include <boost/simd/sdk/simd/logical.hpp>'],
'no_ulp' : 'True',
'notes' : [],
'stamp' : 'modified by jt the 21/02/2011',
},
'ranges' : {
'default' : [['T(-10000)', 'T(10000)']],
},
'specific_values' : {
'default' : {
'boost::simd::One<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Two<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Zero<T>()' : {'result' : 'boost::simd::True<r_t>()','ulp_thresh' : '0.5',},
},
'real_' : {
'boost::simd::Mzero<T>()' : {'result' : 'boost::simd::True<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Half<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Inf<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Minf<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Mone<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Nan<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::One<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Quarter<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Two<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Zero<T>()' : {'result' : 'boost::simd::True<r_t>()','ulp_thresh' : '0.5',},
},
'signed_int_' : {
'boost::simd::Mone<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::One<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Two<T>()' : {'result' : 'boost::simd::False<r_t>()','ulp_thresh' : '0.5',},
'boost::simd::Zero<T>()' : {'result' : 'boost::simd::True<r_t>()','ulp_thresh' : '0.5',},
},
},
'verif_test' : {
'property_call' : {
'default' : ['boost::simd::is_eqz(a0)'],
},
'property_value' : {
'default' : ['a0==0'],
},
'simd' : {
},
'ulp_thresh' : {
'default' : ['0'],
},
},
},
},
]
| true | true |
f71e91cc662cdd088a323256c7ea4e2c01a5e589 | 4,175 | py | Python | st2common/st2common/transport/utils.py | benmcbenben/st2 | f067176640d86924b99bc035c2eb9aabe3b3a734 | [
"Apache-2.0"
] | null | null | null | st2common/st2common/transport/utils.py | benmcbenben/st2 | f067176640d86924b99bc035c2eb9aabe3b3a734 | [
"Apache-2.0"
] | null | null | null | st2common/st2common/transport/utils.py | benmcbenben/st2 | f067176640d86924b99bc035c2eb9aabe3b3a734 | [
"Apache-2.0"
] | null | null | null | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import ssl as ssl_lib
from oslo_config import cfg
from kombu import Connection
from st2common import log as logging
__all__ = [
'get_connection',
'get_messaging_urls'
]
LOG = logging.getLogger(__name__)
def get_messaging_urls():
'''
Determines the right messaging urls to supply. In case the `cluster_urls` config is
specified then that is used. Else the single `url` property is used.
:rtype: ``list``
'''
if cfg.CONF.messaging.cluster_urls:
return cfg.CONF.messaging.cluster_urls
return [cfg.CONF.messaging.url]
def get_connection(urls=None, connection_kwargs=None):
"""
Retrieve kombu "Conection" class instance configured with all the correct
options using values from the config and provided values.
:param connection_kwargs: Any additional connection keyword arguments passed directly to the
Connection class constructor.
:type connection_kwargs: ``dict``
"""
urls = urls or get_messaging_urls()
connection_kwargs = connection_kwargs or {}
kwargs = {}
ssl_kwargs = _get_ssl_kwargs(ssl=cfg.CONF.messaging.ssl,
ssl_keyfile=cfg.CONF.messaging.ssl_keyfile,
ssl_certfile=cfg.CONF.messaging.ssl_certfile,
ssl_cert_reqs=cfg.CONF.messaging.ssl_cert_reqs,
ssl_ca_certs=cfg.CONF.messaging.ssl_ca_certs,
login_method=cfg.CONF.messaging.login_method)
# NOTE: "connection_kwargs" argument passed to this function has precedence over config values
if len(ssl_kwargs) == 1 and ssl_kwargs['ssl'] is True:
kwargs.update({'ssl': True})
elif len(ssl_kwargs) >= 2:
ssl_kwargs.pop('ssl')
kwargs.update({'ssl': ssl_kwargs})
kwargs['login_method'] = cfg.CONF.messaging.login_method
kwargs.update(connection_kwargs)
# NOTE: This line contains no secret values so it's OK to log it
LOG.debug('Using SSL context for RabbitMQ connection: %s' % (ssl_kwargs))
connection = Connection(urls, **kwargs)
return connection
def _get_ssl_kwargs(ssl=False, ssl_keyfile=None, ssl_certfile=None, ssl_cert_reqs=None,
ssl_ca_certs=None, login_method=None):
"""
Return SSL keyword arguments to be used with the kombu.Connection class.
"""
ssl_kwargs = {}
# NOTE: If "ssl" is not set to True we don't pass "ssl=False" argument to the constructor
# because user could still specify to use SSL by including "?ssl=true" query param at the
# end of the connection URL string
if ssl is True:
ssl_kwargs['ssl'] = True
if ssl_keyfile:
ssl_kwargs['ssl'] = True
ssl_kwargs['keyfile'] = ssl_keyfile
if ssl_certfile:
ssl_kwargs['ssl'] = True
ssl_kwargs['certfile'] = ssl_certfile
if ssl_cert_reqs:
if ssl_cert_reqs == 'none':
ssl_cert_reqs = ssl_lib.CERT_NONE
elif ssl_cert_reqs == 'optional':
ssl_cert_reqs = ssl_lib.CERT_OPTIONAL
elif ssl_cert_reqs == 'required':
ssl_cert_reqs = ssl_lib.CERT_REQUIRED
ssl_kwargs['cert_reqs'] = ssl_cert_reqs
if ssl_ca_certs:
ssl_kwargs['ssl'] = True
ssl_kwargs['ca_certs'] = ssl_ca_certs
return ssl_kwargs
| 34.791667 | 98 | 0.679281 |
from __future__ import absolute_import
import ssl as ssl_lib
from oslo_config import cfg
from kombu import Connection
from st2common import log as logging
__all__ = [
'get_connection',
'get_messaging_urls'
]
LOG = logging.getLogger(__name__)
def get_messaging_urls():
if cfg.CONF.messaging.cluster_urls:
return cfg.CONF.messaging.cluster_urls
return [cfg.CONF.messaging.url]
def get_connection(urls=None, connection_kwargs=None):
urls = urls or get_messaging_urls()
connection_kwargs = connection_kwargs or {}
kwargs = {}
ssl_kwargs = _get_ssl_kwargs(ssl=cfg.CONF.messaging.ssl,
ssl_keyfile=cfg.CONF.messaging.ssl_keyfile,
ssl_certfile=cfg.CONF.messaging.ssl_certfile,
ssl_cert_reqs=cfg.CONF.messaging.ssl_cert_reqs,
ssl_ca_certs=cfg.CONF.messaging.ssl_ca_certs,
login_method=cfg.CONF.messaging.login_method)
if len(ssl_kwargs) == 1 and ssl_kwargs['ssl'] is True:
kwargs.update({'ssl': True})
elif len(ssl_kwargs) >= 2:
ssl_kwargs.pop('ssl')
kwargs.update({'ssl': ssl_kwargs})
kwargs['login_method'] = cfg.CONF.messaging.login_method
kwargs.update(connection_kwargs)
LOG.debug('Using SSL context for RabbitMQ connection: %s' % (ssl_kwargs))
connection = Connection(urls, **kwargs)
return connection
def _get_ssl_kwargs(ssl=False, ssl_keyfile=None, ssl_certfile=None, ssl_cert_reqs=None,
ssl_ca_certs=None, login_method=None):
ssl_kwargs = {}
# NOTE: If "ssl" is not set to True we don't pass "ssl=False" argument to the constructor
if ssl is True:
ssl_kwargs['ssl'] = True
if ssl_keyfile:
ssl_kwargs['ssl'] = True
ssl_kwargs['keyfile'] = ssl_keyfile
if ssl_certfile:
ssl_kwargs['ssl'] = True
ssl_kwargs['certfile'] = ssl_certfile
if ssl_cert_reqs:
if ssl_cert_reqs == 'none':
ssl_cert_reqs = ssl_lib.CERT_NONE
elif ssl_cert_reqs == 'optional':
ssl_cert_reqs = ssl_lib.CERT_OPTIONAL
elif ssl_cert_reqs == 'required':
ssl_cert_reqs = ssl_lib.CERT_REQUIRED
ssl_kwargs['cert_reqs'] = ssl_cert_reqs
if ssl_ca_certs:
ssl_kwargs['ssl'] = True
ssl_kwargs['ca_certs'] = ssl_ca_certs
return ssl_kwargs
| true | true |
f71e9293b1b22dfe1dd09dc3be56a2df0b029670 | 2,578 | py | Python | info/Modules/index/views.py | xihuaxone/NewsWeb | d6f0b7f854a9b21619c81d6c736c5f084c572dc2 | [
"MIT"
] | null | null | null | info/Modules/index/views.py | xihuaxone/NewsWeb | d6f0b7f854a9b21619c81d6c736c5f084c572dc2 | [
"MIT"
] | null | null | null | info/Modules/index/views.py | xihuaxone/NewsWeb | d6f0b7f854a9b21619c81d6c736c5f084c572dc2 | [
"MIT"
] | null | null | null | # encoding=utf-8
from flask import Blueprint, make_response, render_template, current_app, session, jsonify, g
from flask import session, request
import math
from info.utils.response_code import RET
from info.models import *
from info import constants
from info.utils.common import LoginUser, RankList
blue = Blueprint('blue1', __name__)
@blue.route('/favicon.ico')
def Favicon():
return current_app.send_static_file('news/favicon.ico')
@blue.route('/')
@LoginUser
@RankList
def index():
from info import db
# 从redis查询login_user, 若不存在则user = None;存在则从数据库查询得到user相关信息,存储为字典;
current_user_id = None
current_user = g.login_user
# 获取所有新闻类别的列表;
try:
category_list = Category.query.all()
category_list = [item.to_dict() for item in category_list]
except Exception as err:
current_app.logger.error(err)
return jsonify(errno=RET.DBERR, errmsg='数据库操作失败')
datas = {
'login_user': current_user.to_dict() if current_user else None,
'click_rank': g.news_click_rank,
'category_list': category_list
}
ret = make_response(render_template('news/html/index.html', datas = datas))
return ret
@blue.route('/news_list')
def newsList():
list_infos = request.args
# 请求新闻列表id;
cid = list_infos.get('cid',1)
# 请求页码;
page = list_infos.get('page',1)
# 每页信息数;
per_page = list_infos.get('per_page', constants.HOME_PAGE_MAX_NEWS)
news_list = list()
request_infos = list()
# 判断请求数据是否完整;
if not all([cid, page, per_page]):
return jsonify(errno=RET.NODATA, errmsg='请求数据不全')
# 判断请求数据格式,防止非法注入;
try:
cid = int(cid)
page = int(page)
per_page = int(per_page)
except Exception as err:
current_app.logger.error(err)
return jsonify(errno=RET.DATAERR, errmsg='html请求格式错误')
# try 查询数据库,生成要求的数据;
try:
filter_op = []
if not cid == 1:
filter_op.append(News.category_id == cid)
request_infos = News.query.filter(News.status == 0, *filter_op).order_by(News.create_time.desc()).paginate(page = page, per_page = per_page)
page_count = request_infos.pages
except Exception as err:
current_app.logger.error(err)
return jsonify(errno=RET.DBERR, errmsg='数据库查询失败')
# 把查询到的结果以前端要求的格式封装;
if request_infos:
for news in request_infos.items:
news_list.append(news.to_basic_dict())
return jsonify(errno = RET.OK, errmsg = 'OK', cid = cid, currentPage = page, newsList = news_list, totalPage = page_count) | 27.72043 | 148 | 0.669123 |
from flask import Blueprint, make_response, render_template, current_app, session, jsonify, g
from flask import session, request
import math
from info.utils.response_code import RET
from info.models import *
from info import constants
from info.utils.common import LoginUser, RankList
blue = Blueprint('blue1', __name__)
@blue.route('/favicon.ico')
def Favicon():
return current_app.send_static_file('news/favicon.ico')
@blue.route('/')
@LoginUser
@RankList
def index():
from info import db
current_user_id = None
current_user = g.login_user
try:
category_list = Category.query.all()
category_list = [item.to_dict() for item in category_list]
except Exception as err:
current_app.logger.error(err)
return jsonify(errno=RET.DBERR, errmsg='数据库操作失败')
datas = {
'login_user': current_user.to_dict() if current_user else None,
'click_rank': g.news_click_rank,
'category_list': category_list
}
ret = make_response(render_template('news/html/index.html', datas = datas))
return ret
@blue.route('/news_list')
def newsList():
list_infos = request.args
cid = list_infos.get('cid',1)
page = list_infos.get('page',1)
per_page = list_infos.get('per_page', constants.HOME_PAGE_MAX_NEWS)
news_list = list()
request_infos = list()
if not all([cid, page, per_page]):
return jsonify(errno=RET.NODATA, errmsg='请求数据不全')
try:
cid = int(cid)
page = int(page)
per_page = int(per_page)
except Exception as err:
current_app.logger.error(err)
return jsonify(errno=RET.DATAERR, errmsg='html请求格式错误')
try:
filter_op = []
if not cid == 1:
filter_op.append(News.category_id == cid)
request_infos = News.query.filter(News.status == 0, *filter_op).order_by(News.create_time.desc()).paginate(page = page, per_page = per_page)
page_count = request_infos.pages
except Exception as err:
current_app.logger.error(err)
return jsonify(errno=RET.DBERR, errmsg='数据库查询失败')
if request_infos:
for news in request_infos.items:
news_list.append(news.to_basic_dict())
return jsonify(errno = RET.OK, errmsg = 'OK', cid = cid, currentPage = page, newsList = news_list, totalPage = page_count) | true | true |
f71e9396e107a5d1eba85299cfd4f19d90e0a5d8 | 3,987 | py | Python | tests/unit/test_gff3_parser.py | dcolligan/ga4gh-server | dd0b00a52de9684609b7f04a9d70946c36afa8a5 | [
"Apache-2.0"
] | 83 | 2015-01-05T22:21:11.000Z | 2017-02-20T01:25:28.000Z | tests/unit/test_gff3_parser.py | dcolligan/ga4gh-server | dd0b00a52de9684609b7f04a9d70946c36afa8a5 | [
"Apache-2.0"
] | 1,508 | 2015-01-02T14:06:12.000Z | 2017-03-08T19:49:18.000Z | tests/unit/test_gff3_parser.py | dcolligan/ga4gh-server | dd0b00a52de9684609b7f04a9d70946c36afa8a5 | [
"Apache-2.0"
] | 99 | 2015-01-14T20:48:56.000Z | 2017-03-08T18:35:06.000Z | """
GFF3 parser unit tests.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ga4gh.server.gff3 as gff3
import unittest
_testDataDir = "tests/data/datasets/dataset1/sequenceAnnotations/"
class TestGff3ParserOnTypicalFile(unittest.TestCase):
"""
Data driven unit tests for the GFF3 parser
"""
def setUp(self):
testDataFile = _testDataDir + "gencodeV21Set1.gff3"
self.gff3Parser = gff3.Gff3Parser(testDataFile)
self.gff3Data = self.gff3Parser.parse()
def testFileParsedHasSomeRootFeatures(self):
self.assertIsNotNone(self.gff3Data.roots, "No root features")
self.assertNotEqual(len(self.gff3Data.roots), 0, "No root features")
def testSomeFeatureIsWellFormed(self):
featId = self.gff3Data.byFeatureName.keys()[0]
feat = self.gff3Data.byFeatureName[featId][0]
self.assertEqual(featId, feat.featureName, "featureName mismatch")
self.assertIsNotNone(feat.seqname, "sequence name is not populated")
self.assertGreaterEqual(feat.end, feat.start, "end less than start")
self.assertIn(feat.strand, u"+-", "strand is neither + nor -")
self.assertIsNotNone(feat.source, "source is unspecified")
self.assertIsNotNone(feat.type, "feature type is unspecified")
self.assertIsInstance(feat.parents, set, "parents not a set")
self.assertIsInstance(feat.children, set, "children not a set")
def testRootFeaturesHaveNoParents(self):
for root in self.gff3Data.roots:
self.assertEqual(
len(root.parents), 0, "root feature has a parent")
def testAllFeaturesContainAllRootFeatures(self):
for root in self.gff3Data.roots:
feat = self.gff3Data.byFeatureName[root.featureName]
self.assertGreaterEqual(
len(feat), 1,
"root feature not in list of all features")
def testInvalidFeatureNameKeyQueryFails(self):
badFeatureName = "987654"
badFeat = self.gff3Data.byFeatureName[badFeatureName]
self.assertEqual(
len(badFeat), 0,
"invalid feature ID returned valid object")
def testAllChildrenFeaturesArePresentInSet(self):
for featList in self.gff3Data.byFeatureName.values():
for feat in featList:
for child in feat.children:
childLookup = self.gff3Data.byFeatureName[
child.featureName]
self.assertGreaterEqual(
len(childLookup), 1,
"child feature not in set")
class TestGff3ParserOnDiscontinuousFeatureFile(TestGff3ParserOnTypicalFile):
"""
Data driven parser test on file with discontinuous features.
The tests here rely on specific data in the file being parsed.
"""
def setUp(self):
testDataFile = _testDataDir + "discontinuous.gff3"
self.gff3Parser = gff3.Gff3Parser(testDataFile)
self.gff3Data = self.gff3Parser.parse()
def testDiscontinuousFeature(self):
feat = self.gff3Data.byFeatureName['apidb|cds_MAL13P1.103-1']
self.assertEqual(
len(feat), 10,
"not all parts of discontinuous feature parsed")
class TestGff3ParserOnSacCerFile(TestGff3ParserOnTypicalFile):
"""
Data driven parser test on file from Saccharomyces cerevisiae S288C genome.
"""
def setUp(self):
testDataFile = _testDataDir + "sacCerTest.gff3"
self.gff3Parser = gff3.Gff3Parser(testDataFile)
self.gff3Data = self.gff3Parser.parse()
class TestGff3ParserOnSpecialCasesFile(TestGff3ParserOnTypicalFile):
"""
Data driven parser test on a GFF3 file representing edge cases.
"""
def setUp(self):
testDataFile = _testDataDir + "specialCasesTest.gff3"
self.gff3Parser = gff3.Gff3Parser(testDataFile)
self.gff3Data = self.gff3Parser.parse()
| 38.336538 | 79 | 0.676198 | from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ga4gh.server.gff3 as gff3
import unittest
_testDataDir = "tests/data/datasets/dataset1/sequenceAnnotations/"
class TestGff3ParserOnTypicalFile(unittest.TestCase):
def setUp(self):
testDataFile = _testDataDir + "gencodeV21Set1.gff3"
self.gff3Parser = gff3.Gff3Parser(testDataFile)
self.gff3Data = self.gff3Parser.parse()
def testFileParsedHasSomeRootFeatures(self):
self.assertIsNotNone(self.gff3Data.roots, "No root features")
self.assertNotEqual(len(self.gff3Data.roots), 0, "No root features")
def testSomeFeatureIsWellFormed(self):
featId = self.gff3Data.byFeatureName.keys()[0]
feat = self.gff3Data.byFeatureName[featId][0]
self.assertEqual(featId, feat.featureName, "featureName mismatch")
self.assertIsNotNone(feat.seqname, "sequence name is not populated")
self.assertGreaterEqual(feat.end, feat.start, "end less than start")
self.assertIn(feat.strand, u"+-", "strand is neither + nor -")
self.assertIsNotNone(feat.source, "source is unspecified")
self.assertIsNotNone(feat.type, "feature type is unspecified")
self.assertIsInstance(feat.parents, set, "parents not a set")
self.assertIsInstance(feat.children, set, "children not a set")
def testRootFeaturesHaveNoParents(self):
for root in self.gff3Data.roots:
self.assertEqual(
len(root.parents), 0, "root feature has a parent")
def testAllFeaturesContainAllRootFeatures(self):
for root in self.gff3Data.roots:
feat = self.gff3Data.byFeatureName[root.featureName]
self.assertGreaterEqual(
len(feat), 1,
"root feature not in list of all features")
def testInvalidFeatureNameKeyQueryFails(self):
badFeatureName = "987654"
badFeat = self.gff3Data.byFeatureName[badFeatureName]
self.assertEqual(
len(badFeat), 0,
"invalid feature ID returned valid object")
def testAllChildrenFeaturesArePresentInSet(self):
for featList in self.gff3Data.byFeatureName.values():
for feat in featList:
for child in feat.children:
childLookup = self.gff3Data.byFeatureName[
child.featureName]
self.assertGreaterEqual(
len(childLookup), 1,
"child feature not in set")
class TestGff3ParserOnDiscontinuousFeatureFile(TestGff3ParserOnTypicalFile):
def setUp(self):
testDataFile = _testDataDir + "discontinuous.gff3"
self.gff3Parser = gff3.Gff3Parser(testDataFile)
self.gff3Data = self.gff3Parser.parse()
def testDiscontinuousFeature(self):
feat = self.gff3Data.byFeatureName['apidb|cds_MAL13P1.103-1']
self.assertEqual(
len(feat), 10,
"not all parts of discontinuous feature parsed")
class TestGff3ParserOnSacCerFile(TestGff3ParserOnTypicalFile):
def setUp(self):
testDataFile = _testDataDir + "sacCerTest.gff3"
self.gff3Parser = gff3.Gff3Parser(testDataFile)
self.gff3Data = self.gff3Parser.parse()
class TestGff3ParserOnSpecialCasesFile(TestGff3ParserOnTypicalFile):
def setUp(self):
testDataFile = _testDataDir + "specialCasesTest.gff3"
self.gff3Parser = gff3.Gff3Parser(testDataFile)
self.gff3Data = self.gff3Parser.parse()
| true | true |
f71e939873aac156dae8e715c3fca52635645354 | 13,575 | py | Python | dlex/datasets/nlp/utils.py | dvtrung/dl-torch | b49e57d10d32bb223e2d7643f2579ccc32c63a9a | [
"MIT"
] | null | null | null | dlex/datasets/nlp/utils.py | dvtrung/dl-torch | b49e57d10d32bb223e2d7643f2579ccc32c63a9a | [
"MIT"
] | null | null | null | dlex/datasets/nlp/utils.py | dvtrung/dl-torch | b49e57d10d32bb223e2d7643f2579ccc32c63a9a | [
"MIT"
] | null | null | null | """NLP Dataset"""
import os
import re
from typing import List, Union, Dict, Tuple
import nltk
import unicodedata
import numpy as np
from dlex.configs import ModuleConfigs
from dlex.utils.logging import logger
# nltk.download('punkt')
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
def load_tkn_to_idx(filename):
tkn_to_idx = {}
fo = open(filename, encoding='utf-8')
for line in fo:
line = line.strip()
if line == "":
continue
tkn_to_idx[line] = len(tkn_to_idx)
fo.close()
return tkn_to_idx
def normalize_lower(sentence: str):
return sentence.strip().lower()
def normalize_lower_alphanumeric(sentence: str):
s = sentence.strip().lower()
s = re.sub("[^a-z0-9\uAC00-\uD7A3]+", " ", s)
return s
def normalize_string_ascii(sentence):
"""
:param str sentence:
:return: normalized sentence, separated by space
:rtype str
"""
# x = re.sub("[^ a-zA-Z0-9\uAC00-\uD7A3]+", " ", x)
# x = re.sub("[\u3040-\u30FF]+", "\u3042", x) # convert Hiragana and Katakana to あ
# x = re.sub("[\u4E00-\u9FFF]+", "\u6F22", x) # convert CJK unified ideographs to 漢
sent = unicodeToAscii(sentence.lower().strip())
sent = re.sub(r"([.!?,])", r" \1", sent)
sent = re.sub(r"[^a-zA-Z.!?,]+", r" ", sent)
sent = re.sub(r"\s+", " ", sent)
sent = re.sub("^ | $", "", sent)
words = sent.split(' ')
ret = []
for word in words:
ret.append(normalize_word(word))
return ' '.join(ret)
def normalize_string(sentence):
"""
:param str sentence:
:return: normalized sentence, separated by space
:rtype str
"""
# x = re.sub("[^ a-zA-Z0-9\uAC00-\uD7A3]+", " ", x)
# x = re.sub("[\u3040-\u30FF]+", "\u3042", x) # convert Hiragana and Katakana to あ
# x = re.sub("[\u4E00-\u9FFF]+", "\u6F22", x) # convert CJK unified ideographs to 漢
sentence = re.sub(r"([\.!?,\";\(\)])\'", r" \1", sentence)
# sent = re.sub(r"[^a-zA-Z.!?,]+", r" ", sent)
sentence = re.sub(r"\s+", " ", sentence)
sentence = re.sub("^ | $", "", sentence)
words = sentence.split(' ')
ret = []
for word in words:
ret.append(normalize_word(word))
return ' '.join(ret)
def normalize_word(word):
punctuations = [',', '.', '-', '"', ':', '!', '(', ')', '...', '?']
if word in ',.!?':
return word
elif word in punctuations:
return '<punc>'
elif any('0' <= c <= '9' for c in word):
return '<non-word>'
else:
return word.lower()
def normalize_none(s):
return s
def nltk_tokenize(s):
return nltk.word_tokenize(s)
class Tokenizer:
def __init__(self, normalize_fn=None, tokenize_fn=None):
self.normalize_fn = normalize_fn
self.tokenize_fn = tokenize_fn
def process(self, s):
s = self.normalize_fn(s)
s = self.tokenize_fn(s)
return s
spacy_nlp = None
def spacy_tokenize(s):
import spacy
from spacy.symbols import ORTH
global spacy_nlp
if spacy_nlp is None:
# sputnik.install('spacy', spacy.about.__version__, 'en_default', data_path=ModuleConfigs.get_tmp_path())
spacy_nlp = spacy.load('en_core_web_sm', via=ModuleConfigs.get_tmp_path())
spacy_nlp.tokenizer.add_special_case('<eos>', [{ORTH: '<eos>'}])
spacy_nlp.tokenizer.add_special_case('<bos>', [{ORTH: '<bos>'}])
spacy_nlp.tokenizer.add_special_case('<unk>', [{ORTH: '<unk>'}])
return [_s.text for _s in spacy_nlp.tokenizer(s)]
def normalize_char(char):
return char.lower().replace(' ', '_')
def space_tokenize(s):
return s.split(' ')
def char_tokenize(s: str):
s = s.replace(" ", "_")
return list(s)
def mecab_tokenize(s):
import MeCab
wakati = MeCab.Tagger("-Owakati")
return wakati.parse(s).split()
def write_vocab(
text: Union[str, List[str], List[List[str]]],
output_path: str,
tokenizer: Tokenizer = None,
min_freq=0,
specials=None):
"""
:param text: text or list of sentences
:param output_path:
:param tokenizer: if tokenizer is None, tokens are separated by space
:param min_freq:
:param specials:
:return:
"""
if tokenizer is None:
tokenizer = Tokenizer(normalize_none, space_tokenize)
if specials is None:
specials = ['<pad>', '<sos>', '<eos>', '<oov>']
word_freqs = {}
if isinstance(text, str):
text = [text]
for sent in text:
if isinstance(sent, str):
# if normalize_fn is not None:
# s = normalize_fn(sent.replace('_', ' '))
# else:
# s = sent
# ls = char_tokenize(s) if token == 'char' else space_tokenize(s)
sent = tokenizer.process(sent)
for word in sent:
if word.strip() == '':
continue
if word in word_freqs:
word_freqs[word] += 1
else:
word_freqs[word] = 1
words = list([word for word in word_freqs if word_freqs[word] > min_freq])
words.sort(key=lambda word: word_freqs[word], reverse=True)
with open(output_path, "w", encoding='utf-8') as fo:
fo.write('\n'.join(specials) + '\n')
fo.write("\n".join(words))
logger.info("Vocab written to %s (%d tokens)", output_path, len(specials) + len(words))
def get_token_id(vocab, word):
"""
:type vocab: Vocab
:type word: str
:rtype: int
"""
if word in vocab:
return vocab[word]
else:
if '<oov>' in vocab:
return vocab['<oov>']
elif '<unk>' in vocab:
return vocab['<unk>']
else:
raise Exception("No out-of-vocabulary token found.")
class Vocab:
def __init__(self, index2token: List[str] = None, token2index: Dict[str, int] = None):
if index2token is None:
self._token2index = {}
self._index2token = []
else:
self._index2token = index2token
if token2index:
self._token2index = token2index
else:
self._token2index = {token: idx for idx, token in enumerate(index2token)}
self.embeddings = None
self.embedding_dim = None
@classmethod
def from_file(cls, file_name):
index2token = []
fo = open(file_name, encoding='utf-8')
for line in fo:
line = line.strip()
if line == "":
continue
index2token.append(line)
fo.close()
return cls(index2token)
def __getitem__(self, token: str) -> int:
return self._token2index[token] if token in self._token2index else self.oov_token_idx
def tolist(self) -> List[str]:
return self._index2token
def get_token_id(self, token):
return self[token] or self.oov_token_idx
def add_token(self, token: str):
if token not in self._token2index:
self._token2index[token] = len(self._token2index)
self._index2token.append(token)
def __len__(self):
return len(self._token2index)
def get_token(self, idx: int) -> str:
return self._index2token[idx]
def decode_idx_list(self, ls: List[int], ignore: List[int] = None, stop_at: int = None) -> List[str]:
ret = []
for idx in ls:
if stop_at and idx == stop_at:
break
elif ignore and idx in ignore:
continue
else:
ret.append(self.get_token(idx))
return ret
def encode_token_list(self, ls: List[str]) -> List[int]:
return [self.get_token_id(token) for token in ls]
@property
def sos_token_idx(self) -> int:
idx = self['<sos>'] or self['<s>']
assert idx is not None
return idx
@property
def eos_token_idx(self) -> int:
idx = self['<eos>'] or self['</s>']
assert idx is not None
return idx
@property
def blank_token_idx(self):
idx = self['<blank>'] or self['<pad>']
assert idx is not None
return idx
@property
def oov_token_idx(self) -> int:
if '<oov>' in self._token2index:
return self._token2index['<oov>']
elif '<unk>' in self._token2index:
return self._token2index['<unk>']
else:
raise Exception("<oov> token not found.")
def get_specials(self):
return [token for token in self._index2token if token.startswith('<')]
def init_pretrained_embeddings(
self,
pretrained: str,
emb_name: str = None,
dim: int = None) -> np.ndarray:
if pretrained == 'glove':
from torchtext.vocab import GloVe
dim = dim or 300
vocab = GloVe(
name=emb_name or '840B', dim=dim,
cache=os.path.join(ModuleConfigs.get_tmp_path(), "torchtext"))
elif pretrained == 'fasttext':
from torchtext.vocab import FastText
vocab = FastText()
else:
raise ValueError("Pre-trained embeddings not found.")
vectors = vocab.vectors
oovs = []
embeddings = np.zeros([len(self), dim])
for idx, t in enumerate(self._index2token):
_t = t.lower()
if _t in vocab.stoi:
embeddings[idx, :] = vectors[vocab.stoi[_t]].cpu().numpy()
if all(token in vocab.stoi for token in _t.split(' ')):
embeddings[idx, :] = np.sum([vectors[vocab.stoi[token]].cpu().numpy() for token in _t.split(' ')])
else:
oovs.append(_t)
if oovs:
logger.warning(f"{len(oovs)} tokens not found in pre-trained embeddings: {', '.join(oovs)}")
logger.debug(f"Load embeddings: {pretrained} (no. embeddings: {len(self) - len(oovs):,})")
self.embedding_dim = dim
self.embeddings = embeddings
def get_token_embedding(self, token: str) -> np.ndarray:
if self.embeddings is None:
raise ValueError('Embeddings are not initialized')
return self.embeddings[self.get_token_id(token)]
def embed_token_list(self, ls):
emb = np.zeros(self.embedding_dim)
for token in ls:
emb += self.get_token_embedding(token)
return emb
def load_embeddings(
pretrained: str,
emb_name: str = None,
dim: int = None,
vocab_size: int = None,
tokens: List[str] = None,
specials: List[str] = None) -> Tuple[np.ndarray, Vocab]:
"""
Load pre-trained embedding defined in dataset.embeddings
:param tokens: if specified, only load embeddings of these tokens
:param specials: special tokens
:return:
"""
if not pretrained:
assert dim is not None
assert vocab_size is not None
return np.random.rand(vocab_size, dim), None
elif pretrained.lower() in ["glove", "fasttext"]:
if pretrained.lower() == 'glove':
from torchtext.vocab import GloVe
vocab = GloVe(
name=emb_name, dim=dim,
cache=os.path.join(ModuleConfigs.get_tmp_path(), "torchtext"))
elif pretrained.lower() == 'fasttext':
from torchtext.vocab import FastText
vocab = FastText()
else:
raise ValueError("Pre-trained embeddings not found.")
vectors = vocab.vectors
index2token = vocab.itos
token2index = None
if tokens: # limit vocabulary to list of tokens
num_oovs = 0
keep = []
index2token = []
token2index = {}
for t in tokens:
_t = t.lower()
if _t in token2index:
if t not in token2index:
token2index[t] = token2index[_t]
elif _t in vocab.stoi:
keep.append(vocab.stoi[_t.lower()])
token2index[_t] = len(index2token)
token2index[t] = len(index2token)
index2token.append(_t)
else:
num_oovs += 1
vectors = vectors[keep]
if num_oovs:
logger.warning(f"{num_oovs} tokens not found in pre-trained embeddings")
logger.debug(f"Load embeddings: {pretrained} (no. embeddings: {len(index2token):,})")
if specials is not None:
for s in specials:
token2index[s] = len(index2token)
index2token.append(s)
index2token += specials
vectors = torch.cat([vectors, torch.rand(len(specials), len(vectors[0]))])
# return nn.Embedding.from_pretrained(vectors, freeze=emb.freeze or True), Vocab(index2token, token2index)
return vectors, Vocab(index2token, token2index)
else:
raise ValueError(f"{pretrained} is not supported.")
| 31.49652 | 115 | 0.550571 | import os
import re
from typing import List, Union, Dict, Tuple
import nltk
import unicodedata
import numpy as np
from dlex.configs import ModuleConfigs
from dlex.utils.logging import logger
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
def load_tkn_to_idx(filename):
tkn_to_idx = {}
fo = open(filename, encoding='utf-8')
for line in fo:
line = line.strip()
if line == "":
continue
tkn_to_idx[line] = len(tkn_to_idx)
fo.close()
return tkn_to_idx
def normalize_lower(sentence: str):
return sentence.strip().lower()
def normalize_lower_alphanumeric(sentence: str):
s = sentence.strip().lower()
s = re.sub("[^a-z0-9\uAC00-\uD7A3]+", " ", s)
return s
def normalize_string_ascii(sentence):
b(r"([.!?,])", r" \1", sent)
sent = re.sub(r"[^a-zA-Z.!?,]+", r" ", sent)
sent = re.sub(r"\s+", " ", sent)
sent = re.sub("^ | $", "", sent)
words = sent.split(' ')
ret = []
for word in words:
ret.append(normalize_word(word))
return ' '.join(ret)
def normalize_string(sentence):
sent = re.sub(r"[^a-zA-Z.!?,]+", r" ", sent)
sentence = re.sub(r"\s+", " ", sentence)
sentence = re.sub("^ | $", "", sentence)
words = sentence.split(' ')
ret = []
for word in words:
ret.append(normalize_word(word))
return ' '.join(ret)
def normalize_word(word):
punctuations = [',', '.', '-', '"', ':', '!', '(', ')', '...', '?']
if word in ',.!?':
return word
elif word in punctuations:
return '<punc>'
elif any('0' <= c <= '9' for c in word):
return '<non-word>'
else:
return word.lower()
def normalize_none(s):
return s
def nltk_tokenize(s):
return nltk.word_tokenize(s)
class Tokenizer:
def __init__(self, normalize_fn=None, tokenize_fn=None):
self.normalize_fn = normalize_fn
self.tokenize_fn = tokenize_fn
def process(self, s):
s = self.normalize_fn(s)
s = self.tokenize_fn(s)
return s
spacy_nlp = None
def spacy_tokenize(s):
import spacy
from spacy.symbols import ORTH
global spacy_nlp
if spacy_nlp is None:
# sputnik.install('spacy', spacy.about.__version__, 'en_default', data_path=ModuleConfigs.get_tmp_path())
spacy_nlp = spacy.load('en_core_web_sm', via=ModuleConfigs.get_tmp_path())
spacy_nlp.tokenizer.add_special_case('<eos>', [{ORTH: '<eos>'}])
spacy_nlp.tokenizer.add_special_case('<bos>', [{ORTH: '<bos>'}])
spacy_nlp.tokenizer.add_special_case('<unk>', [{ORTH: '<unk>'}])
return [_s.text for _s in spacy_nlp.tokenizer(s)]
def normalize_char(char):
return char.lower().replace(' ', '_')
def space_tokenize(s):
return s.split(' ')
def char_tokenize(s: str):
s = s.replace(" ", "_")
return list(s)
def mecab_tokenize(s):
import MeCab
wakati = MeCab.Tagger("-Owakati")
return wakati.parse(s).split()
def write_vocab(
text: Union[str, List[str], List[List[str]]],
output_path: str,
tokenizer: Tokenizer = None,
min_freq=0,
specials=None):
if tokenizer is None:
tokenizer = Tokenizer(normalize_none, space_tokenize)
if specials is None:
specials = ['<pad>', '<sos>', '<eos>', '<oov>']
word_freqs = {}
if isinstance(text, str):
text = [text]
for sent in text:
if isinstance(sent, str):
# if normalize_fn is not None:
# s = normalize_fn(sent.replace('_', ' '))
# else:
# s = sent
# ls = char_tokenize(s) if token == 'char' else space_tokenize(s)
sent = tokenizer.process(sent)
for word in sent:
if word.strip() == '':
continue
if word in word_freqs:
word_freqs[word] += 1
else:
word_freqs[word] = 1
words = list([word for word in word_freqs if word_freqs[word] > min_freq])
words.sort(key=lambda word: word_freqs[word], reverse=True)
with open(output_path, "w", encoding='utf-8') as fo:
fo.write('\n'.join(specials) + '\n')
fo.write("\n".join(words))
logger.info("Vocab written to %s (%d tokens)", output_path, len(specials) + len(words))
def get_token_id(vocab, word):
if word in vocab:
return vocab[word]
else:
if '<oov>' in vocab:
return vocab['<oov>']
elif '<unk>' in vocab:
return vocab['<unk>']
else:
raise Exception("No out-of-vocabulary token found.")
class Vocab:
def __init__(self, index2token: List[str] = None, token2index: Dict[str, int] = None):
if index2token is None:
self._token2index = {}
self._index2token = []
else:
self._index2token = index2token
if token2index:
self._token2index = token2index
else:
self._token2index = {token: idx for idx, token in enumerate(index2token)}
self.embeddings = None
self.embedding_dim = None
@classmethod
def from_file(cls, file_name):
index2token = []
fo = open(file_name, encoding='utf-8')
for line in fo:
line = line.strip()
if line == "":
continue
index2token.append(line)
fo.close()
return cls(index2token)
def __getitem__(self, token: str) -> int:
return self._token2index[token] if token in self._token2index else self.oov_token_idx
def tolist(self) -> List[str]:
return self._index2token
def get_token_id(self, token):
return self[token] or self.oov_token_idx
def add_token(self, token: str):
if token not in self._token2index:
self._token2index[token] = len(self._token2index)
self._index2token.append(token)
def __len__(self):
return len(self._token2index)
def get_token(self, idx: int) -> str:
return self._index2token[idx]
def decode_idx_list(self, ls: List[int], ignore: List[int] = None, stop_at: int = None) -> List[str]:
ret = []
for idx in ls:
if stop_at and idx == stop_at:
break
elif ignore and idx in ignore:
continue
else:
ret.append(self.get_token(idx))
return ret
def encode_token_list(self, ls: List[str]) -> List[int]:
return [self.get_token_id(token) for token in ls]
@property
def sos_token_idx(self) -> int:
idx = self['<sos>'] or self['<s>']
assert idx is not None
return idx
@property
def eos_token_idx(self) -> int:
idx = self['<eos>'] or self['</s>']
assert idx is not None
return idx
@property
def blank_token_idx(self):
idx = self['<blank>'] or self['<pad>']
assert idx is not None
return idx
@property
def oov_token_idx(self) -> int:
if '<oov>' in self._token2index:
return self._token2index['<oov>']
elif '<unk>' in self._token2index:
return self._token2index['<unk>']
else:
raise Exception("<oov> token not found.")
def get_specials(self):
return [token for token in self._index2token if token.startswith('<')]
def init_pretrained_embeddings(
self,
pretrained: str,
emb_name: str = None,
dim: int = None) -> np.ndarray:
if pretrained == 'glove':
from torchtext.vocab import GloVe
dim = dim or 300
vocab = GloVe(
name=emb_name or '840B', dim=dim,
cache=os.path.join(ModuleConfigs.get_tmp_path(), "torchtext"))
elif pretrained == 'fasttext':
from torchtext.vocab import FastText
vocab = FastText()
else:
raise ValueError("Pre-trained embeddings not found.")
vectors = vocab.vectors
oovs = []
embeddings = np.zeros([len(self), dim])
for idx, t in enumerate(self._index2token):
_t = t.lower()
if _t in vocab.stoi:
embeddings[idx, :] = vectors[vocab.stoi[_t]].cpu().numpy()
if all(token in vocab.stoi for token in _t.split(' ')):
embeddings[idx, :] = np.sum([vectors[vocab.stoi[token]].cpu().numpy() for token in _t.split(' ')])
else:
oovs.append(_t)
if oovs:
logger.warning(f"{len(oovs)} tokens not found in pre-trained embeddings: {', '.join(oovs)}")
logger.debug(f"Load embeddings: {pretrained} (no. embeddings: {len(self) - len(oovs):,})")
self.embedding_dim = dim
self.embeddings = embeddings
def get_token_embedding(self, token: str) -> np.ndarray:
if self.embeddings is None:
raise ValueError('Embeddings are not initialized')
return self.embeddings[self.get_token_id(token)]
def embed_token_list(self, ls):
emb = np.zeros(self.embedding_dim)
for token in ls:
emb += self.get_token_embedding(token)
return emb
def load_embeddings(
pretrained: str,
emb_name: str = None,
dim: int = None,
vocab_size: int = None,
tokens: List[str] = None,
specials: List[str] = None) -> Tuple[np.ndarray, Vocab]:
if not pretrained:
assert dim is not None
assert vocab_size is not None
return np.random.rand(vocab_size, dim), None
elif pretrained.lower() in ["glove", "fasttext"]:
if pretrained.lower() == 'glove':
from torchtext.vocab import GloVe
vocab = GloVe(
name=emb_name, dim=dim,
cache=os.path.join(ModuleConfigs.get_tmp_path(), "torchtext"))
elif pretrained.lower() == 'fasttext':
from torchtext.vocab import FastText
vocab = FastText()
else:
raise ValueError("Pre-trained embeddings not found.")
vectors = vocab.vectors
index2token = vocab.itos
token2index = None
if tokens: # limit vocabulary to list of tokens
num_oovs = 0
keep = []
index2token = []
token2index = {}
for t in tokens:
_t = t.lower()
if _t in token2index:
if t not in token2index:
token2index[t] = token2index[_t]
elif _t in vocab.stoi:
keep.append(vocab.stoi[_t.lower()])
token2index[_t] = len(index2token)
token2index[t] = len(index2token)
index2token.append(_t)
else:
num_oovs += 1
vectors = vectors[keep]
if num_oovs:
logger.warning(f"{num_oovs} tokens not found in pre-trained embeddings")
logger.debug(f"Load embeddings: {pretrained} (no. embeddings: {len(index2token):,})")
if specials is not None:
for s in specials:
token2index[s] = len(index2token)
index2token.append(s)
index2token += specials
vectors = torch.cat([vectors, torch.rand(len(specials), len(vectors[0]))])
# return nn.Embedding.from_pretrained(vectors, freeze=emb.freeze or True), Vocab(index2token, token2index)
return vectors, Vocab(index2token, token2index)
else:
raise ValueError(f"{pretrained} is not supported.")
| true | true |
f71e93dc1e8f76e4a04e77f4cc3875792895275e | 2,360 | py | Python | src/processData.py | mabelzunce/PETAtlases | 438276ff06a8f2f61eb506e5f0e28a257c85d9aa | [
"MIT"
] | null | null | null | src/processData.py | mabelzunce/PETAtlases | 438276ff06a8f2f61eb506e5f0e28a257c85d9aa | [
"MIT"
] | null | null | null | src/processData.py | mabelzunce/PETAtlases | 438276ff06a8f2f61eb506e5f0e28a257c85d9aa | [
"MIT"
] | null | null | null | #! python3
from __future__ import print_function
import SimpleITK as sitk
import ImageRegistration as reg
import numpy as np
import sys
import os
outputPath = "D:\\Martin\\Personal\\UNSAM\\CursoNeuroimagenes\\TrabajosFinales\\NicolasFuentes\\ADNI\\002_S_5018\\RegisteredData\\"
if not os.path.exists(outputPath):
os.mkdir(outputPath)
petImageFilename = "D:\\Martin\\Personal\\UNSAM\\CursoNeuroimagenes\\TrabajosFinales\\NicolasFuentes\\ADNI\\002_S_5018\\ADNI_Brain_PET__Raw_AV45\\2012-11-15_16_29_51.0\\I347148\\ADNI_002_S_5018_PT_ADNI_Brain_PET__Raw_AV45_br_raw_20121119110623877_305_S174962_I347148.nii"
mriImageFilename = "D:\\Martin\\Personal\\UNSAM\\CursoNeuroimagenes\\TrabajosFinales\\NicolasFuentes\\ADNI\\002_S_5018\\ADNI_002_S_5018_MR_MPRAGE_br_raw_20121112145218294_127_S174291_I346242.nii"
mni152Filename = "D:\\Martin\\Personal\\UNSAM\\CursoNeuroimagenes\\TrabajosFinales\\NicolasFuentes\\Atlas\\icbm_avg_152_t1_tal_nlin_symmetric_VI.mnc"
petImage = sitk.Cast(sitk.ReadImage(petImageFilename), sitk.sitkFloat32)
mriImage = sitk.Cast(sitk.ReadImage(mriImageFilename), sitk.sitkFloat32)
mriMni152Image = sitk.Cast(sitk.ReadImage(mni152Filename), sitk.sitkFloat32)
sitk.WriteImage(petImage, outputPath + "PET.nii")
sitk.WriteImage(mriImage, outputPath + "MRI.nii")
sitk.WriteImage(mriMni152Image, outputPath + "MNI152.nii")
# Registration
resultsRegistration = reg.RigidImageRegistration(petImage, sitk.Cast(mriImage, sitk.sitkFloat32), printLog = True)
sitk.WriteImage(resultsRegistration["image"], outputPath + "regPET.nii")
# Normalize MRI into MNI152.
# Create a mask for MNI 152:
otsuSegmentation = sitk.OtsuMultipleThresholds(mriMni152Image, 3, 0, 128, False)
maskMNI152 = otsuSegmentation > 0
sitk.WriteImage(maskMNI152, outputPath + "maskMNI152.nii")
# Two steps, first affine transform, then nonlinear:
resultsFirstStepNormalization = reg.AffineImageRegistration(mriImage, mriMni152Image, printLog = True, fixedMask = maskMNI152)
sitk.WriteImage(resultsFirstStepNormalization["image"], outputPath + "normalizedAffineMRI.nii")
# Now the nonlinear registration:
resultsNonlinearRegistration = reg.NonlinearImageRegistration(resultsFirstStepNormalization["image"], mriMni152Image, printLog = True)
sitk.WriteImage(resultsNonlinearRegistration["image"], outputPath + "normalizedMRI.nii") | 53.636364 | 272 | 0.806356 |
from __future__ import print_function
import SimpleITK as sitk
import ImageRegistration as reg
import numpy as np
import sys
import os
outputPath = "D:\\Martin\\Personal\\UNSAM\\CursoNeuroimagenes\\TrabajosFinales\\NicolasFuentes\\ADNI\\002_S_5018\\RegisteredData\\"
if not os.path.exists(outputPath):
os.mkdir(outputPath)
petImageFilename = "D:\\Martin\\Personal\\UNSAM\\CursoNeuroimagenes\\TrabajosFinales\\NicolasFuentes\\ADNI\\002_S_5018\\ADNI_Brain_PET__Raw_AV45\\2012-11-15_16_29_51.0\\I347148\\ADNI_002_S_5018_PT_ADNI_Brain_PET__Raw_AV45_br_raw_20121119110623877_305_S174962_I347148.nii"
mriImageFilename = "D:\\Martin\\Personal\\UNSAM\\CursoNeuroimagenes\\TrabajosFinales\\NicolasFuentes\\ADNI\\002_S_5018\\ADNI_002_S_5018_MR_MPRAGE_br_raw_20121112145218294_127_S174291_I346242.nii"
mni152Filename = "D:\\Martin\\Personal\\UNSAM\\CursoNeuroimagenes\\TrabajosFinales\\NicolasFuentes\\Atlas\\icbm_avg_152_t1_tal_nlin_symmetric_VI.mnc"
petImage = sitk.Cast(sitk.ReadImage(petImageFilename), sitk.sitkFloat32)
mriImage = sitk.Cast(sitk.ReadImage(mriImageFilename), sitk.sitkFloat32)
mriMni152Image = sitk.Cast(sitk.ReadImage(mni152Filename), sitk.sitkFloat32)
sitk.WriteImage(petImage, outputPath + "PET.nii")
sitk.WriteImage(mriImage, outputPath + "MRI.nii")
sitk.WriteImage(mriMni152Image, outputPath + "MNI152.nii")
resultsRegistration = reg.RigidImageRegistration(petImage, sitk.Cast(mriImage, sitk.sitkFloat32), printLog = True)
sitk.WriteImage(resultsRegistration["image"], outputPath + "regPET.nii")
otsuSegmentation = sitk.OtsuMultipleThresholds(mriMni152Image, 3, 0, 128, False)
maskMNI152 = otsuSegmentation > 0
sitk.WriteImage(maskMNI152, outputPath + "maskMNI152.nii")
resultsFirstStepNormalization = reg.AffineImageRegistration(mriImage, mriMni152Image, printLog = True, fixedMask = maskMNI152)
sitk.WriteImage(resultsFirstStepNormalization["image"], outputPath + "normalizedAffineMRI.nii")
resultsNonlinearRegistration = reg.NonlinearImageRegistration(resultsFirstStepNormalization["image"], mriMni152Image, printLog = True)
sitk.WriteImage(resultsNonlinearRegistration["image"], outputPath + "normalizedMRI.nii") | true | true |
f71e943cd279890286dde5c70ab1018c6adc2ce4 | 4,541 | py | Python | allennlp/tests/data/fields/sequence_label_field_test.py | annaproxy/udify-metalearning | 55206a3aac0aba74a3615a36192d03b6467cfd6f | [
"MIT"
] | 65 | 2020-11-13T05:36:29.000Z | 2022-03-26T22:45:46.000Z | allennlp/tests/data/fields/sequence_label_field_test.py | annaproxy/udify-metalearning | 55206a3aac0aba74a3615a36192d03b6467cfd6f | [
"MIT"
] | 11 | 2021-05-26T16:22:17.000Z | 2022-03-02T04:03:18.000Z | allennlp/tests/data/fields/sequence_label_field_test.py | annaproxy/udify-metalearning | 55206a3aac0aba74a3615a36192d03b6467cfd6f | [
"MIT"
] | 10 | 2019-12-06T11:32:37.000Z | 2022-01-06T15:39:09.000Z | # pylint: disable=no-self-use,invalid-name
from collections import defaultdict
import pytest
import numpy
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import TextField, SequenceLabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class TestSequenceLabelField(AllenNlpTestCase):
def setUp(self):
super(TestSequenceLabelField, self).setUp()
self.text = TextField([Token(t) for t in ["here", "are", "some", "words", "."]],
{"words": SingleIdTokenIndexer("words")})
def test_tag_length_mismatch_raises(self):
with pytest.raises(ConfigurationError):
wrong_tags = ["B", "O", "O"]
_ = SequenceLabelField(wrong_tags, self.text)
def test_count_vocab_items_correctly_indexes_tags(self):
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="labels")
counter = defaultdict(lambda: defaultdict(int))
sequence_label_field.count_vocab_items(counter)
assert counter["labels"]["B"] == 1
assert counter["labels"]["I"] == 1
assert counter["labels"]["O"] == 3
assert set(counter.keys()) == {"labels"}
def test_index_converts_field_correctly(self):
vocab = Vocabulary()
b_index = vocab.add_token_to_namespace("B", namespace='*labels')
i_index = vocab.add_token_to_namespace("I", namespace='*labels')
o_index = vocab.add_token_to_namespace("O", namespace='*labels')
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="*labels")
sequence_label_field.index(vocab)
# pylint: disable=protected-access
assert sequence_label_field._indexed_labels == [b_index, i_index, o_index, o_index, o_index]
# pylint: enable=protected-access
def test_as_tensor_produces_integer_targets(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("B", namespace='*labels')
vocab.add_token_to_namespace("I", namespace='*labels')
vocab.add_token_to_namespace("O", namespace='*labels')
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="*labels")
sequence_label_field.index(vocab)
padding_lengths = sequence_label_field.get_padding_lengths()
tensor = sequence_label_field.as_tensor(padding_lengths).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 1, 2, 2, 2]))
def test_sequence_label_field_raises_on_incorrect_type(self):
with pytest.raises(ConfigurationError):
_ = SequenceLabelField([[], [], [], [], []], self.text)
def test_class_variables_for_namespace_warnings_work_correctly(self):
# pylint: disable=protected-access
tags = ["B", "I", "O", "O", "O"]
assert "text" not in SequenceLabelField._already_warned_namespaces
with self.assertLogs(logger="allennlp.data.fields.sequence_label_field", level="WARNING"):
_ = SequenceLabelField(tags, self.text, label_namespace="text")
# We've warned once, so we should have set the class variable to False.
assert "text" in SequenceLabelField._already_warned_namespaces
with pytest.raises(AssertionError):
with self.assertLogs(logger="allennlp.data.fields.sequence_label_field", level="WARNING"):
_ = SequenceLabelField(tags, self.text, label_namespace="text")
# ... but a new namespace should still log a warning.
assert "text2" not in SequenceLabelField._already_warned_namespaces
with self.assertLogs(logger="allennlp.data.fields.sequence_label_field", level="WARNING"):
_ = SequenceLabelField(tags, self.text, label_namespace="text2")
def test_printing_doesnt_crash(self):
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="labels")
print(sequence_label_field)
def test_sequence_methods(self):
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="labels")
assert len(sequence_label_field) == 5
assert sequence_label_field[1] == "I"
assert [label for label in sequence_label_field] == tags
| 45.868687 | 102 | 0.679366 |
from collections import defaultdict
import pytest
import numpy
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import TextField, SequenceLabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class TestSequenceLabelField(AllenNlpTestCase):
def setUp(self):
super(TestSequenceLabelField, self).setUp()
self.text = TextField([Token(t) for t in ["here", "are", "some", "words", "."]],
{"words": SingleIdTokenIndexer("words")})
def test_tag_length_mismatch_raises(self):
with pytest.raises(ConfigurationError):
wrong_tags = ["B", "O", "O"]
_ = SequenceLabelField(wrong_tags, self.text)
def test_count_vocab_items_correctly_indexes_tags(self):
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="labels")
counter = defaultdict(lambda: defaultdict(int))
sequence_label_field.count_vocab_items(counter)
assert counter["labels"]["B"] == 1
assert counter["labels"]["I"] == 1
assert counter["labels"]["O"] == 3
assert set(counter.keys()) == {"labels"}
def test_index_converts_field_correctly(self):
vocab = Vocabulary()
b_index = vocab.add_token_to_namespace("B", namespace='*labels')
i_index = vocab.add_token_to_namespace("I", namespace='*labels')
o_index = vocab.add_token_to_namespace("O", namespace='*labels')
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="*labels")
sequence_label_field.index(vocab)
assert sequence_label_field._indexed_labels == [b_index, i_index, o_index, o_index, o_index]
def test_as_tensor_produces_integer_targets(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("B", namespace='*labels')
vocab.add_token_to_namespace("I", namespace='*labels')
vocab.add_token_to_namespace("O", namespace='*labels')
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="*labels")
sequence_label_field.index(vocab)
padding_lengths = sequence_label_field.get_padding_lengths()
tensor = sequence_label_field.as_tensor(padding_lengths).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 1, 2, 2, 2]))
def test_sequence_label_field_raises_on_incorrect_type(self):
with pytest.raises(ConfigurationError):
_ = SequenceLabelField([[], [], [], [], []], self.text)
def test_class_variables_for_namespace_warnings_work_correctly(self):
tags = ["B", "I", "O", "O", "O"]
assert "text" not in SequenceLabelField._already_warned_namespaces
with self.assertLogs(logger="allennlp.data.fields.sequence_label_field", level="WARNING"):
_ = SequenceLabelField(tags, self.text, label_namespace="text")
assert "text" in SequenceLabelField._already_warned_namespaces
with pytest.raises(AssertionError):
with self.assertLogs(logger="allennlp.data.fields.sequence_label_field", level="WARNING"):
_ = SequenceLabelField(tags, self.text, label_namespace="text")
# ... but a new namespace should still log a warning.
assert "text2" not in SequenceLabelField._already_warned_namespaces
with self.assertLogs(logger="allennlp.data.fields.sequence_label_field", level="WARNING"):
_ = SequenceLabelField(tags, self.text, label_namespace="text2")
def test_printing_doesnt_crash(self):
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="labels")
print(sequence_label_field)
def test_sequence_methods(self):
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="labels")
assert len(sequence_label_field) == 5
assert sequence_label_field[1] == "I"
assert [label for label in sequence_label_field] == tags
| true | true |
f71e947b79357afd20224cfeefc521067a15de20 | 476 | py | Python | setup.py | CampbellCrowley/bplot | b5e5080cdcdc9c4d3e5114c13702cbb2f49fbb8c | [
"BSD-3-Clause"
] | null | null | null | setup.py | CampbellCrowley/bplot | b5e5080cdcdc9c4d3e5114c13702cbb2f49fbb8c | [
"BSD-3-Clause"
] | null | null | null | setup.py | CampbellCrowley/bplot | b5e5080cdcdc9c4d3e5114c13702cbb2f49fbb8c | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
setup(
name="bplot",
version="0.2",
description="Functional plotting.",
url="http://github.com/roualdes/bplot",
author="Edward A. Roualdes",
author_email="eroualdes@csuchico.edu",
license="BSD (3-clause)",
install_requires=[
"matplotlib>=3.0.0",
"numpy>=1.7,<2.0",
"scipy>=0.19.1",
"pandas>=0.25.0",
],
packages=["bplot"],
package_dir={"": "src"},
zip_safe=False,
)
| 22.666667 | 43 | 0.573529 | from setuptools import setup
setup(
name="bplot",
version="0.2",
description="Functional plotting.",
url="http://github.com/roualdes/bplot",
author="Edward A. Roualdes",
author_email="eroualdes@csuchico.edu",
license="BSD (3-clause)",
install_requires=[
"matplotlib>=3.0.0",
"numpy>=1.7,<2.0",
"scipy>=0.19.1",
"pandas>=0.25.0",
],
packages=["bplot"],
package_dir={"": "src"},
zip_safe=False,
)
| true | true |
f71e949573678c6e993f1d29898b3f2046e7012c | 1,912 | py | Python | imap_tools/errors.py | unqx/imap_tools | 7f8fd5e4f3976bbd2efa507843c577affa61d996 | [
"Apache-2.0"
] | 344 | 2017-05-31T09:45:41.000Z | 2022-03-31T18:32:16.000Z | imap_tools/errors.py | unqx/imap_tools | 7f8fd5e4f3976bbd2efa507843c577affa61d996 | [
"Apache-2.0"
] | 153 | 2017-07-26T07:49:06.000Z | 2022-03-31T16:43:52.000Z | imap_tools/errors.py | unqx/imap_tools | 7f8fd5e4f3976bbd2efa507843c577affa61d996 | [
"Apache-2.0"
] | 53 | 2018-12-06T05:49:14.000Z | 2022-02-19T12:42:56.000Z | class ImapToolsError(Exception):
"""Base lib error"""
class MailboxFolderStatusValueError(ImapToolsError):
"""Wrong folder status value error"""
class UnexpectedCommandStatusError(ImapToolsError):
"""Unexpected status in IMAP command response"""
def __init__(self, command_result: tuple, expected: str):
"""
:param command_result: imap command result
:param expected: expected command status
"""
self.command_result = command_result
self.expected = expected
def __str__(self):
return 'Response status "{exp}" expected, but "{typ}" received. Data: {data}'.format(
exp=self.expected, typ=self.command_result[0], data=str(self.command_result[1]))
class MailboxFolderSelectError(UnexpectedCommandStatusError):
pass
class MailboxFolderCreateError(UnexpectedCommandStatusError):
pass
class MailboxFolderRenameError(UnexpectedCommandStatusError):
pass
class MailboxFolderDeleteError(UnexpectedCommandStatusError):
pass
class MailboxFolderStatusError(UnexpectedCommandStatusError):
pass
class MailboxFolderSubscribeError(UnexpectedCommandStatusError):
pass
class MailboxLoginError(UnexpectedCommandStatusError):
pass
class MailboxLogoutError(UnexpectedCommandStatusError):
pass
class MailboxNumbersError(UnexpectedCommandStatusError):
pass
class MailboxUidsError(UnexpectedCommandStatusError):
pass
class MailboxStarttlsError(UnexpectedCommandStatusError):
pass
class MailboxFetchError(UnexpectedCommandStatusError):
pass
class MailboxExpungeError(UnexpectedCommandStatusError):
pass
class MailboxDeleteError(UnexpectedCommandStatusError):
pass
class MailboxCopyError(UnexpectedCommandStatusError):
pass
class MailboxFlagError(UnexpectedCommandStatusError):
pass
class MailboxAppendError(UnexpectedCommandStatusError):
pass
| 21.010989 | 93 | 0.769874 | class ImapToolsError(Exception):
class MailboxFolderStatusValueError(ImapToolsError):
class UnexpectedCommandStatusError(ImapToolsError):
def __init__(self, command_result: tuple, expected: str):
self.command_result = command_result
self.expected = expected
def __str__(self):
return 'Response status "{exp}" expected, but "{typ}" received. Data: {data}'.format(
exp=self.expected, typ=self.command_result[0], data=str(self.command_result[1]))
class MailboxFolderSelectError(UnexpectedCommandStatusError):
pass
class MailboxFolderCreateError(UnexpectedCommandStatusError):
pass
class MailboxFolderRenameError(UnexpectedCommandStatusError):
pass
class MailboxFolderDeleteError(UnexpectedCommandStatusError):
pass
class MailboxFolderStatusError(UnexpectedCommandStatusError):
pass
class MailboxFolderSubscribeError(UnexpectedCommandStatusError):
pass
class MailboxLoginError(UnexpectedCommandStatusError):
pass
class MailboxLogoutError(UnexpectedCommandStatusError):
pass
class MailboxNumbersError(UnexpectedCommandStatusError):
pass
class MailboxUidsError(UnexpectedCommandStatusError):
pass
class MailboxStarttlsError(UnexpectedCommandStatusError):
pass
class MailboxFetchError(UnexpectedCommandStatusError):
pass
class MailboxExpungeError(UnexpectedCommandStatusError):
pass
class MailboxDeleteError(UnexpectedCommandStatusError):
pass
class MailboxCopyError(UnexpectedCommandStatusError):
pass
class MailboxFlagError(UnexpectedCommandStatusError):
pass
class MailboxAppendError(UnexpectedCommandStatusError):
pass
| true | true |
f71e9497bcf482a547136061650f08ce8c27aa78 | 345 | py | Python | app/__init__.py | gordinmitya/tgnotifierbot | 200a27bc0ee63dcb74018f30cc5e855d8b30cda8 | [
"MIT"
] | null | null | null | app/__init__.py | gordinmitya/tgnotifierbot | 200a27bc0ee63dcb74018f30cc5e855d8b30cda8 | [
"MIT"
] | null | null | null | app/__init__.py | gordinmitya/tgnotifierbot | 200a27bc0ee63dcb74018f30cc5e855d8b30cda8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
def api(environ, start_response):
"""Simplest possible application object"""
data = b'{"code": 200}\n'
status = '200 OK'
response_headers = [
('Content-type', 'application/json'),
('Content-Length', str(len(data)))
]
start_response(status, response_headers)
return iter([data]) | 28.75 | 46 | 0.623188 |
def api(environ, start_response):
data = b'{"code": 200}\n'
status = '200 OK'
response_headers = [
('Content-type', 'application/json'),
('Content-Length', str(len(data)))
]
start_response(status, response_headers)
return iter([data]) | true | true |
f71e99704e778f9397e5fa8db226d45e87f41161 | 19,241 | py | Python | cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | # (c) Copyright 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Script to push the zone configuration to Cisco SAN switches.
"""
import random
import re
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder import ssh_utils
from cinder import utils
import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant
LOG = logging.getLogger(__name__)
class CiscoFCZoneClientCLI(object):
"""Cisco FC zone client cli implementation.
OpenStack Fibre Channel zone client cli connector
to manage FC zoning in Cisco SAN fabrics.
Version history:
1.0 - Initial Cisco FC zone client cli
"""
switch_ip = None
switch_port = '22'
switch_user = 'admin'
switch_pwd = 'none'
def __init__(self, ipaddress, username, password, port, vsan):
"""initializing the client."""
self.switch_ip = ipaddress
self.switch_port = port
self.switch_user = username
self.switch_pwd = password
self.fabric_vsan = vsan
self.sshpool = None
def get_active_zone_set(self):
"""Return the active zone configuration.
Return active zoneset from fabric. When none of the configurations
are active then it will return empty map.
:returns: Map -- active zone set map in the following format
{
'zones':
{'openstack50060b0000c26604201900051ee8e329':
['50060b0000c26604', '201900051ee8e329']
},
'active_zone_config': 'OpenStack_Cfg'
}
"""
zone_set = {}
zone = {}
zone_member = None
zone_name = None
switch_data = None
zone_set_name = None
try:
switch_data = self._get_switch_info(
[ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan,
' | no-more'])
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed getting active zone set "
"from fabric %s"), self.switch_ip)
try:
for line in switch_data:
# Split on non-word characters,
line_split = re.split('[\s\[\]]+', line)
if ZoneConstant.CFG_ZONESET in line_split:
# zoneset name [name] vsan [vsan]
zone_set_name = \
line_split[line_split.index(ZoneConstant.CFG_ZONESET)
+ 2]
continue
if ZoneConstant.CFG_ZONE in line_split:
# zone name [name] vsan [vsan]
zone_name = \
line_split[line_split.index(ZoneConstant.CFG_ZONE) + 2]
zone[zone_name] = list()
continue
if ZoneConstant.CFG_ZONE_MEMBER in line_split:
# Examples:
# pwwn c0:50:76:05:15:9f:00:12
# * fcid 0x1e01c0 [pwwn 50:05:07:68:02:20:48:04] [V7K_N1P2]
zone_member = \
line_split[
line_split.index(ZoneConstant.CFG_ZONE_MEMBER) + 1]
zone_member_list = zone.get(zone_name)
zone_member_list.append(zone_member)
zone_set[ZoneConstant.CFG_ZONES] = zone
zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] = zone_set_name
except Exception as ex:
# In case of parsing error here, it should be malformed cli output.
msg = _("Malformed zone configuration: (switch=%(switch)s "
"zone_config=%(zone_config)s)."
) % {'switch': self.switch_ip,
'zone_config': switch_data}
LOG.error(msg)
exc_msg = _("Exception: %s") % six.text_type(ex)
LOG.exception(exc_msg)
raise exception.FCZoneDriverException(reason=msg)
return zone_set
def add_zones(self, zones, activate, fabric_vsan, active_zone_set,
zone_status):
"""Add zone configuration.
This method will add the zone configuration passed by user.
input params:
zones - zone names mapped to members and VSANs.
zone members are colon separated but case-insensitive
{ zonename1:[zonememeber1,zonemember2,...],
zonename2:[zonemember1, zonemember2,...]...}
e.g: {'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']
}
activate - True/False
"""
LOG.debug("Add Zones - Zones passed: %s", zones)
LOG.debug("Active zone set:%s", active_zone_set)
zone_list = active_zone_set[ZoneConstant.CFG_ZONES]
LOG.debug("zone list:%s", zone_list)
LOG.debug("zone status:%s", zone_status)
cfg_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG]
zone_cmds = [['conf'],
['zoneset', 'name', cfg_name, 'vsan', fabric_vsan]]
for zone in zones.keys():
# if zone exists, its an update. Delete & insert
LOG.debug("Update call")
if zone in zone_list:
# Response from get_active_zone_set strips colons from WWPNs
current_zone = set(zone_list[zone])
new_wwpns = map(lambda x: x.lower().replace(':', ''),
zones[zone])
new_zone = set(new_wwpns)
if current_zone != new_zone:
try:
self.delete_zones([zone], activate, fabric_vsan,
active_zone_set, zone_status)
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Deleting zone failed %s"), zone)
LOG.debug("Deleted Zone before insert : %s", zone)
zone_cmds.append(['zone', 'name', zone])
for member in zones[zone]:
zone_cmds.append(['member', 'pwwn', member])
zone_cmds.append(['end'])
try:
LOG.debug("Add zones: Config cmd to run:%s", zone_cmds)
self._ssh_execute(zone_cmds, True, 1)
if activate:
self.activate_zoneset(cfg_name, fabric_vsan, zone_status)
self._cfg_save()
except Exception as e:
msg = _("Creating and activating zone set failed: "
"(Zone set=%(zoneset)s error=%(err)s)."
) % {'zoneset': cfg_name, 'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def activate_zoneset(self, cfgname, fabric_vsan, zone_status):
"""Method to Activate the zone config. Param cfgname - ZonesetName."""
LOG.debug("zone status:%s", zone_status)
cmd_list = [['conf'],
['zoneset', 'activate', 'name', cfgname, 'vsan',
self.fabric_vsan]]
if zone_status['mode'] == 'enhanced':
cmd_list.append(['zone', 'commit', 'vsan', fabric_vsan])
cmd_list.append(['end'])
return self._ssh_execute(cmd_list, True, 1)
def get_zoning_status(self):
"""Return the zoning mode and session for a zoneset."""
zone_status = {}
try:
switch_data = self._get_switch_info(
[ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan])
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed getting zone status "
"from fabric %s"), self.switch_ip)
try:
for line in switch_data:
# Split on non-word characters,
line_split = re.split('[\s\[\]]+', line)
if 'mode:' in line_split:
# mode: <enhanced|basic>
zone_status['mode'] = line_split[line_split.index('mode:')
+ 1]
continue
if 'session:' in line_split:
# session: <none|a value other than none>
zone_status['session'] = \
line_split[line_split.index('session:') + 1]
continue
except Exception as ex:
# In case of parsing error here, it should be malformed cli output.
msg = _("Malformed zone status: (switch=%(switch)s "
"zone_config=%(zone_config)s)."
) % {'switch': self.switch_ip,
'zone_status': switch_data}
LOG.error(msg)
exc_msg = _("Exception: %s") % six.text_type(ex)
LOG.exception(exc_msg)
raise exception.FCZoneDriverException(reason=msg)
return zone_status
def delete_zones(self, zone_names, activate, fabric_vsan, active_zone_set,
zone_status):
"""Delete zones from fabric.
Method to delete the active zone config zones
params zone_names: zoneNames separated by semicolon
params activate: True/False
"""
LOG.debug("zone_names %s", zone_names)
active_zoneset_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG]
cmds = [['conf'],
['zoneset', 'name', active_zoneset_name, 'vsan',
fabric_vsan]]
try:
for zone in set(zone_names.split(';')):
cmds.append(['no', 'zone', 'name', zone])
cmds.append(['end'])
LOG.debug("Delete zones: Config cmd to run:%s", cmds)
self._ssh_execute(cmds, True, 1)
if activate:
self.activate_zoneset(active_zoneset_name, fabric_vsan,
zone_status)
self._cfg_save()
except Exception as e:
msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)."
) % {'cmd': cmds, 'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def get_nameserver_info(self):
"""Get name server data from fabric.
This method will return the connected node port wwn list(local
and remote) for the given switch fabric
show fcns database
"""
cli_output = None
return_list = []
try:
cli_output = self._get_switch_info([ZoneConstant.FCNS_SHOW,
self.fabric_vsan])
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting fcns database "
"info for fabric %s"), self.switch_ip)
if (cli_output):
return_list = self._parse_ns_output(cli_output)
LOG.info(_LI("Connector returning fcnsinfo-%s"), return_list)
return return_list
def _cfg_save(self):
cmd = ['copy', 'running-config', 'startup-config']
self._run_ssh(cmd, True, 1)
def _get_switch_info(self, cmd_list):
stdout, stderr, sw_data = None, None, None
try:
stdout, stderr = self._run_ssh(cmd_list, True, 1)
LOG.debug("CLI output from ssh - output:%s", stdout)
if (stdout):
sw_data = stdout.splitlines()
return sw_data
except processutils.ProcessExecutionError as e:
msg = _("Error while getting data via ssh: (command=%(cmd)s "
"error=%(err)s).") % {'cmd': cmd_list,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def _parse_ns_output(self, switch_data):
"""Parses name server data.
Parses nameserver raw data and adds the device port wwns to the list
:returns: List -- list of device port wwn from ns info
"""
return_list = []
for line in switch_data:
if not(" N " in line):
continue
linesplit = line.split()
if len(linesplit) > 2:
node_port_wwn = linesplit[2]
return_list.append(node_port_wwn)
else:
msg = _("Malformed show fcns database string: %s") % line
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
return return_list
def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
command = ' '.join(cmd_list)
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
except Exception as e:
msg = _("Exception: %s") % six.text_type(e)
LOG.error(msg)
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error running SSH command: %s") % command)
def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1):
"""Execute cli with status update.
Executes CLI commands where status return is expected.
cmd_list is a list of commands, where each command is itself
a list of parameters. We use utils.check_ssh_injection to check each
command, but then join then with " ; " to form a single command.
"""
# Check that each command is secure
for cmd in cmd_list:
utils.check_ssh_injection(cmd)
# Combine into a single command.
command = ' ; '.join(map(lambda x: ' '.join(x), cmd_list))
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
stdin, stdout, stderr = None, None, None
LOG.debug("Executing command via ssh: %s" % command)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
stdin, stdout, stderr = ssh.exec_command(command)
greenthread.sleep(random.randint(20, 500) / 100.0)
channel = stdout.channel
exit_status = channel.recv_exit_status()
LOG.debug("Exit Status from ssh:%s", exit_status)
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise processutils.ProcessExecutionError(
exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=command)
else:
return True
else:
return True
except Exception as e:
msg = _("Exception: %s") % six.text_type(e)
LOG.error(msg)
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
LOG.debug("Handling error case after SSH:%s", last_exception)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_("Error executing command via ssh: %s") %
six.text_type(e))
LOG.error(msg)
finally:
if stdin:
stdin.flush()
stdin.close()
if stdout:
stdout.close()
if stderr:
stderr.close()
def cleanup(self):
self.sshpool = None
| 39.754132 | 79 | 0.521958 |
import random
import re
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder import ssh_utils
from cinder import utils
import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant
LOG = logging.getLogger(__name__)
class CiscoFCZoneClientCLI(object):
switch_ip = None
switch_port = '22'
switch_user = 'admin'
switch_pwd = 'none'
def __init__(self, ipaddress, username, password, port, vsan):
self.switch_ip = ipaddress
self.switch_port = port
self.switch_user = username
self.switch_pwd = password
self.fabric_vsan = vsan
self.sshpool = None
def get_active_zone_set(self):
zone_set = {}
zone = {}
zone_member = None
zone_name = None
switch_data = None
zone_set_name = None
try:
switch_data = self._get_switch_info(
[ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan,
' | no-more'])
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed getting active zone set "
"from fabric %s"), self.switch_ip)
try:
for line in switch_data:
line_split = re.split('[\s\[\]]+', line)
if ZoneConstant.CFG_ZONESET in line_split:
zone_set_name = \
line_split[line_split.index(ZoneConstant.CFG_ZONESET)
+ 2]
continue
if ZoneConstant.CFG_ZONE in line_split:
zone_name = \
line_split[line_split.index(ZoneConstant.CFG_ZONE) + 2]
zone[zone_name] = list()
continue
if ZoneConstant.CFG_ZONE_MEMBER in line_split:
zone_member = \
line_split[
line_split.index(ZoneConstant.CFG_ZONE_MEMBER) + 1]
zone_member_list = zone.get(zone_name)
zone_member_list.append(zone_member)
zone_set[ZoneConstant.CFG_ZONES] = zone
zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] = zone_set_name
except Exception as ex:
msg = _("Malformed zone configuration: (switch=%(switch)s "
"zone_config=%(zone_config)s)."
) % {'switch': self.switch_ip,
'zone_config': switch_data}
LOG.error(msg)
exc_msg = _("Exception: %s") % six.text_type(ex)
LOG.exception(exc_msg)
raise exception.FCZoneDriverException(reason=msg)
return zone_set
def add_zones(self, zones, activate, fabric_vsan, active_zone_set,
zone_status):
LOG.debug("Add Zones - Zones passed: %s", zones)
LOG.debug("Active zone set:%s", active_zone_set)
zone_list = active_zone_set[ZoneConstant.CFG_ZONES]
LOG.debug("zone list:%s", zone_list)
LOG.debug("zone status:%s", zone_status)
cfg_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG]
zone_cmds = [['conf'],
['zoneset', 'name', cfg_name, 'vsan', fabric_vsan]]
for zone in zones.keys():
LOG.debug("Update call")
if zone in zone_list:
current_zone = set(zone_list[zone])
new_wwpns = map(lambda x: x.lower().replace(':', ''),
zones[zone])
new_zone = set(new_wwpns)
if current_zone != new_zone:
try:
self.delete_zones([zone], activate, fabric_vsan,
active_zone_set, zone_status)
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Deleting zone failed %s"), zone)
LOG.debug("Deleted Zone before insert : %s", zone)
zone_cmds.append(['zone', 'name', zone])
for member in zones[zone]:
zone_cmds.append(['member', 'pwwn', member])
zone_cmds.append(['end'])
try:
LOG.debug("Add zones: Config cmd to run:%s", zone_cmds)
self._ssh_execute(zone_cmds, True, 1)
if activate:
self.activate_zoneset(cfg_name, fabric_vsan, zone_status)
self._cfg_save()
except Exception as e:
msg = _("Creating and activating zone set failed: "
"(Zone set=%(zoneset)s error=%(err)s)."
) % {'zoneset': cfg_name, 'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def activate_zoneset(self, cfgname, fabric_vsan, zone_status):
LOG.debug("zone status:%s", zone_status)
cmd_list = [['conf'],
['zoneset', 'activate', 'name', cfgname, 'vsan',
self.fabric_vsan]]
if zone_status['mode'] == 'enhanced':
cmd_list.append(['zone', 'commit', 'vsan', fabric_vsan])
cmd_list.append(['end'])
return self._ssh_execute(cmd_list, True, 1)
def get_zoning_status(self):
zone_status = {}
try:
switch_data = self._get_switch_info(
[ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan])
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed getting zone status "
"from fabric %s"), self.switch_ip)
try:
for line in switch_data:
line_split = re.split('[\s\[\]]+', line)
if 'mode:' in line_split:
zone_status['mode'] = line_split[line_split.index('mode:')
+ 1]
continue
if 'session:' in line_split:
zone_status['session'] = \
line_split[line_split.index('session:') + 1]
continue
except Exception as ex:
msg = _("Malformed zone status: (switch=%(switch)s "
"zone_config=%(zone_config)s)."
) % {'switch': self.switch_ip,
'zone_status': switch_data}
LOG.error(msg)
exc_msg = _("Exception: %s") % six.text_type(ex)
LOG.exception(exc_msg)
raise exception.FCZoneDriverException(reason=msg)
return zone_status
def delete_zones(self, zone_names, activate, fabric_vsan, active_zone_set,
zone_status):
LOG.debug("zone_names %s", zone_names)
active_zoneset_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG]
cmds = [['conf'],
['zoneset', 'name', active_zoneset_name, 'vsan',
fabric_vsan]]
try:
for zone in set(zone_names.split(';')):
cmds.append(['no', 'zone', 'name', zone])
cmds.append(['end'])
LOG.debug("Delete zones: Config cmd to run:%s", cmds)
self._ssh_execute(cmds, True, 1)
if activate:
self.activate_zoneset(active_zoneset_name, fabric_vsan,
zone_status)
self._cfg_save()
except Exception as e:
msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)."
) % {'cmd': cmds, 'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def get_nameserver_info(self):
cli_output = None
return_list = []
try:
cli_output = self._get_switch_info([ZoneConstant.FCNS_SHOW,
self.fabric_vsan])
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting fcns database "
"info for fabric %s"), self.switch_ip)
if (cli_output):
return_list = self._parse_ns_output(cli_output)
LOG.info(_LI("Connector returning fcnsinfo-%s"), return_list)
return return_list
def _cfg_save(self):
cmd = ['copy', 'running-config', 'startup-config']
self._run_ssh(cmd, True, 1)
def _get_switch_info(self, cmd_list):
stdout, stderr, sw_data = None, None, None
try:
stdout, stderr = self._run_ssh(cmd_list, True, 1)
LOG.debug("CLI output from ssh - output:%s", stdout)
if (stdout):
sw_data = stdout.splitlines()
return sw_data
except processutils.ProcessExecutionError as e:
msg = _("Error while getting data via ssh: (command=%(cmd)s "
"error=%(err)s).") % {'cmd': cmd_list,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def _parse_ns_output(self, switch_data):
return_list = []
for line in switch_data:
if not(" N " in line):
continue
linesplit = line.split()
if len(linesplit) > 2:
node_port_wwn = linesplit[2]
return_list.append(node_port_wwn)
else:
msg = _("Malformed show fcns database string: %s") % line
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
return return_list
def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
command = ' '.join(cmd_list)
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
except Exception as e:
msg = _("Exception: %s") % six.text_type(e)
LOG.error(msg)
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error running SSH command: %s") % command)
def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1):
for cmd in cmd_list:
utils.check_ssh_injection(cmd)
command = ' ; '.join(map(lambda x: ' '.join(x), cmd_list))
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
stdin, stdout, stderr = None, None, None
LOG.debug("Executing command via ssh: %s" % command)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
stdin, stdout, stderr = ssh.exec_command(command)
greenthread.sleep(random.randint(20, 500) / 100.0)
channel = stdout.channel
exit_status = channel.recv_exit_status()
LOG.debug("Exit Status from ssh:%s", exit_status)
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise processutils.ProcessExecutionError(
exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=command)
else:
return True
else:
return True
except Exception as e:
msg = _("Exception: %s") % six.text_type(e)
LOG.error(msg)
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
LOG.debug("Handling error case after SSH:%s", last_exception)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_("Error executing command via ssh: %s") %
six.text_type(e))
LOG.error(msg)
finally:
if stdin:
stdin.flush()
stdin.close()
if stdout:
stdout.close()
if stderr:
stderr.close()
def cleanup(self):
self.sshpool = None
| true | true |
f71e99cda0f1de1255e911ccc3a8bdebb2c5f5b9 | 3,846 | py | Python | plugins/modules/netbox_manufacturer.py | FragmentedPacket/netbox_modules | 608b387eb0d3af8a29222905a4ff19515f006a88 | [
"MIT"
] | 38 | 2019-08-28T18:43:20.000Z | 2020-01-09T15:51:34.000Z | plugins/modules/netbox_manufacturer.py | FragmentedPacket/netbox_modules | 608b387eb0d3af8a29222905a4ff19515f006a88 | [
"MIT"
] | 24 | 2019-09-11T03:46:35.000Z | 2019-12-17T06:25:20.000Z | plugins/modules/netbox_manufacturer.py | FragmentedPacket/netbox_modules | 608b387eb0d3af8a29222905a4ff19515f006a88 | [
"MIT"
] | 9 | 2019-09-20T12:27:39.000Z | 2020-01-09T03:12:27.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Mikhail Yohman (@FragmentedPacket) <mikhail.yohman@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
---
module: netbox_manufacturer
short_description: Create or delete manufacturers within NetBox
description:
- Creates or removes manufacturers from NetBox
notes:
- Tags should be defined as a YAML list
- This should be ran with connection C(local) and hosts C(localhost)
author:
- Mikhail Yohman (@FragmentedPacket)
requirements:
- pynetbox
version_added: '0.1.0'
extends_documentation_fragment:
- netbox.netbox.common
options:
data:
type: dict
description:
- Defines the manufacturer configuration
suboptions:
name:
description:
- The name of the manufacturer
required: true
type: str
slug:
description:
- The slugified version of the name or custom slug.
- This is auto-generated following NetBox rules if not provided
required: false
type: str
description:
description:
- The description of the manufacturer
required: false
type: str
tags:
description:
- The tags to add/update
required: false
type: list
elements: raw
version_added: "3.6.0"
custom_fields:
description:
- Must exist in NetBox
required: false
type: dict
version_added: "3.6.0"
required: true
"""
EXAMPLES = r"""
- name: "Test NetBox modules"
connection: local
hosts: localhost
gather_facts: False
tasks:
- name: Create manufacturer within NetBox with only required information
netbox_manufacturer:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test Manufacturer
state: present
- name: Delete manufacturer within netbox
netbox_manufacturer:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test Manufacturer
state: absent
"""
RETURN = r"""
manufacturer:
description: Serialized object as created or already existent within NetBox
returned: success (when I(state=present))
type: dict
msg:
description: Message indicating failure or info about what has been achieved
returned: always
type: str
"""
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_utils import (
NetboxAnsibleModule,
NETBOX_ARG_SPEC,
)
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_dcim import (
NetboxDcimModule,
NB_MANUFACTURERS,
)
from copy import deepcopy
def main():
"""
Main entry point for module execution
"""
argument_spec = deepcopy(NETBOX_ARG_SPEC)
argument_spec.update(
dict(
data=dict(
type="dict",
required=True,
options=dict(
name=dict(required=True, type="str"),
slug=dict(required=False, type="str"),
description=dict(required=False, type="str"),
tags=dict(required=False, type="list", elements="raw"),
custom_fields=dict(required=False, type="dict"),
),
),
)
)
required_if = [("state", "present", ["name"]), ("state", "absent", ["name"])]
module = NetboxAnsibleModule(
argument_spec=argument_spec, supports_check_mode=True, required_if=required_if
)
netbox_manufacturer = NetboxDcimModule(module, NB_MANUFACTURERS)
netbox_manufacturer.run()
if __name__ == "__main__": # pragma: no cover
main()
| 26.895105 | 92 | 0.642226 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
---
module: netbox_manufacturer
short_description: Create or delete manufacturers within NetBox
description:
- Creates or removes manufacturers from NetBox
notes:
- Tags should be defined as a YAML list
- This should be ran with connection C(local) and hosts C(localhost)
author:
- Mikhail Yohman (@FragmentedPacket)
requirements:
- pynetbox
version_added: '0.1.0'
extends_documentation_fragment:
- netbox.netbox.common
options:
data:
type: dict
description:
- Defines the manufacturer configuration
suboptions:
name:
description:
- The name of the manufacturer
required: true
type: str
slug:
description:
- The slugified version of the name or custom slug.
- This is auto-generated following NetBox rules if not provided
required: false
type: str
description:
description:
- The description of the manufacturer
required: false
type: str
tags:
description:
- The tags to add/update
required: false
type: list
elements: raw
version_added: "3.6.0"
custom_fields:
description:
- Must exist in NetBox
required: false
type: dict
version_added: "3.6.0"
required: true
"""
EXAMPLES = r"""
- name: "Test NetBox modules"
connection: local
hosts: localhost
gather_facts: False
tasks:
- name: Create manufacturer within NetBox with only required information
netbox_manufacturer:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test Manufacturer
state: present
- name: Delete manufacturer within netbox
netbox_manufacturer:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test Manufacturer
state: absent
"""
RETURN = r"""
manufacturer:
description: Serialized object as created or already existent within NetBox
returned: success (when I(state=present))
type: dict
msg:
description: Message indicating failure or info about what has been achieved
returned: always
type: str
"""
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_utils import (
NetboxAnsibleModule,
NETBOX_ARG_SPEC,
)
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_dcim import (
NetboxDcimModule,
NB_MANUFACTURERS,
)
from copy import deepcopy
def main():
argument_spec = deepcopy(NETBOX_ARG_SPEC)
argument_spec.update(
dict(
data=dict(
type="dict",
required=True,
options=dict(
name=dict(required=True, type="str"),
slug=dict(required=False, type="str"),
description=dict(required=False, type="str"),
tags=dict(required=False, type="list", elements="raw"),
custom_fields=dict(required=False, type="dict"),
),
),
)
)
required_if = [("state", "present", ["name"]), ("state", "absent", ["name"])]
module = NetboxAnsibleModule(
argument_spec=argument_spec, supports_check_mode=True, required_if=required_if
)
netbox_manufacturer = NetboxDcimModule(module, NB_MANUFACTURERS)
netbox_manufacturer.run()
if __name__ == "__main__":
main()
| true | true |
f71e9b85f8f4af462b5dcd665e455ebdbab39bbd | 7,950 | py | Python | tensorpack/dataflow/dataset/ilsvrc.py | andrewliao11/Andrew_tensorpack | 735a2672e3d93b5b612a303b5b6d222e9b2d4280 | [
"Apache-2.0"
] | 1 | 2018-03-23T16:26:23.000Z | 2018-03-23T16:26:23.000Z | tensorpack/dataflow/dataset/ilsvrc.py | andrewliao11/Andrew_tensorpack | 735a2672e3d93b5b612a303b5b6d222e9b2d4280 | [
"Apache-2.0"
] | null | null | null | tensorpack/dataflow/dataset/ilsvrc.py | andrewliao11/Andrew_tensorpack | 735a2672e3d93b5b612a303b5b6d222e9b2d4280 | [
"Apache-2.0"
] | 2 | 2017-12-16T04:23:35.000Z | 2021-03-04T23:44:13.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: ilsvrc.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import os
import tarfile
import cv2
import numpy as np
from six.moves import range
import xml.etree.ElementTree as ET
from ...utils import logger, get_rng, get_dataset_path
from ...utils.loadcaffe import get_caffe_pb
from ...utils.fs import mkdir_p, download
from ...utils.timer import timed_operation
from ..base import RNGDataFlow
__all__ = ['ILSVRCMeta', 'ILSVRC12']
CAFFE_ILSVRC12_URL = "http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz"
class ILSVRCMeta(object):
"""
Some metadata for ILSVRC dataset.
"""
def __init__(self, dir=None):
if dir is None:
dir = get_dataset_path('ilsvrc_metadata')
self.dir = dir
mkdir_p(self.dir)
self.caffepb = get_caffe_pb()
f = os.path.join(self.dir, 'synsets.txt')
if not os.path.isfile(f):
self._download_caffe_meta()
def get_synset_words_1000(self):
"""
:returns a dict of {cls_number: cls_name}
"""
fname = os.path.join(self.dir, 'synset_words.txt')
assert os.path.isfile(fname)
lines = [x.strip() for x in open(fname).readlines()]
return dict(enumerate(lines))
def get_synset_1000(self):
"""
:returns a dict of {cls_number: synset_id}
"""
fname = os.path.join(self.dir, 'synsets.txt')
assert os.path.isfile(fname)
lines = [x.strip() for x in open(fname).readlines()]
return dict(enumerate(lines))
def _download_caffe_meta(self):
fpath = download(CAFFE_ILSVRC12_URL, self.dir)
tarfile.open(fpath, 'r:gz').extractall(self.dir)
def get_image_list(self, name):
"""
:param name: 'train' or 'val' or 'test'
:returns: list of (image filename, cls)
"""
assert name in ['train', 'val', 'test']
fname = os.path.join(self.dir, name + '.txt')
assert os.path.isfile(fname)
with open(fname) as f:
ret = []
for line in f.readlines():
name, cls = line.strip().split()
ret.append((name, int(cls)))
assert len(ret)
return ret
def get_per_pixel_mean(self, size=None):
"""
:param size: return image size in [h, w]. default to (256, 256)
:returns: per-pixel mean as an array of shape (h, w, 3) in range [0, 255]
"""
obj = self.caffepb.BlobProto()
mean_file = os.path.join(self.dir, 'imagenet_mean.binaryproto')
with open(mean_file, 'rb') as f:
obj.ParseFromString(f.read())
arr = np.array(obj.data).reshape((3, 256, 256)).astype('float32')
arr = np.transpose(arr, [1,2,0])
if size is not None:
arr = cv2.resize(arr, size[::-1])
return arr
class ILSVRC12(RNGDataFlow):
def __init__(self, dir, name, meta_dir=None, shuffle=True,
dir_structure='original', include_bb=False):
"""
:param dir: A directory containing a subdir named `name`, where the
original ILSVRC12_`name`.tar gets decompressed.
:param name: 'train' or 'val' or 'test'
:param dir_structure: The dir structure of 'val' and 'test'.
If is 'original' then keep the original decompressed directory with list
of image files (as below). If set to 'train', use the the same
directory structure as 'train/', with class name as subdirectories.
:param include_bb: Include the bounding box. Maybe useful in training.
When `dir_structure=='original'`, `dir` should have the following structure:
.. code-block:: none
dir/
train/
n02134418/
n02134418_198.JPEG
...
...
val/
ILSVRC2012_val_00000001.JPEG
...
test/
ILSVRC2012_test_00000001.JPEG
...
bbox/
n02134418/
n02134418_198.xml
...
...
After decompress ILSVRC12_img_train.tar, you can use the following
command to build the above structure for `train/`:
.. code-block:: none
tar xvf ILSVRC12_img_train.tar -C train && cd train
find -type f -name '*.tar' | parallel -P 10 'echo {} && mkdir -p {/.} && tar xf {} -C {/.}'
Or:
for i in *.tar; do dir=${i%.tar}; echo $dir; mkdir -p $dir; tar xf $i -C $dir; done
"""
assert name in ['train', 'test', 'val']
self.full_dir = os.path.join(dir, name)
self.name = name
assert os.path.isdir(self.full_dir), self.full_dir
self.shuffle = shuffle
meta = ILSVRCMeta(meta_dir)
self.imglist = meta.get_image_list(name)
self.dir_structure = dir_structure
self.synset = meta.get_synset_1000()
if include_bb:
bbdir = os.path.join(dir, 'bbox') if not \
isinstance(include_bb, six.string_types) else include_bb
assert name == 'train', 'Bounding box only available for training'
self.bblist = ILSVRC12.get_training_bbox(bbdir, self.imglist)
self.include_bb = include_bb
def size(self):
return len(self.imglist)
def get_data(self):
"""
Produce original images of shape [h, w, 3(BGR)], and label,
and optionally a bbox of [xmin, ymin, xmax, ymax]
"""
idxs = np.arange(len(self.imglist))
add_label_to_fname = (self.name != 'train' and self.dir_structure != 'original')
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
fname, label = self.imglist[k]
if add_label_to_fname:
fname = os.path.join(self.full_dir, self.synset[label], fname)
else:
fname = os.path.join(self.full_dir, fname)
im = cv2.imread(fname.strip(), cv2.IMREAD_COLOR)
assert im is not None, fname
if im.ndim == 2:
im = np.expand_dims(im, 2).repeat(3,2)
if self.include_bb:
bb = self.bblist[k]
if bb is None:
bb = [0, 0, im.shape[1]-1, im.shape[0]-1]
yield [im, label, bb]
else:
yield [im, label]
@staticmethod
def get_training_bbox(bbox_dir, imglist):
ret = []
def parse_bbox(fname):
root = ET.parse(fname).getroot()
size = root.find('size').getchildren()
size = map(int, [size[0].text, size[1].text])
box = root.find('object').find('bndbox').getchildren()
box = map(lambda x: float(x.text), box)
#box[0] /= size[0]
#box[1] /= size[1]
#box[2] /= size[0]
#box[3] /= size[1]
return np.asarray(box, dtype='float32')
with timed_operation('Loading Bounding Boxes ...'):
cnt = 0
import tqdm
for k in tqdm.trange(len(imglist)):
fname = imglist[k][0]
fname = fname[:-4] + 'xml'
fname = os.path.join(bbox_dir, fname)
try:
ret.append(parse_bbox(fname))
cnt += 1
except KeyboardInterrupt:
raise
except:
ret.append(None)
logger.info("{}/{} images have bounding box.".format(cnt, len(imglist)))
return ret
if __name__ == '__main__':
meta = ILSVRCMeta()
#print(meta.get_synset_words_1000())
ds = ILSVRC12('/home/wyx/data/fake_ilsvrc/', 'train', include_bb=True,
shuffle=False)
ds.reset_state()
for k in ds.get_data():
from IPython import embed; embed()
break
| 34.868421 | 103 | 0.550692 |
import os
import tarfile
import cv2
import numpy as np
from six.moves import range
import xml.etree.ElementTree as ET
from ...utils import logger, get_rng, get_dataset_path
from ...utils.loadcaffe import get_caffe_pb
from ...utils.fs import mkdir_p, download
from ...utils.timer import timed_operation
from ..base import RNGDataFlow
__all__ = ['ILSVRCMeta', 'ILSVRC12']
CAFFE_ILSVRC12_URL = "http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz"
class ILSVRCMeta(object):
def __init__(self, dir=None):
if dir is None:
dir = get_dataset_path('ilsvrc_metadata')
self.dir = dir
mkdir_p(self.dir)
self.caffepb = get_caffe_pb()
f = os.path.join(self.dir, 'synsets.txt')
if not os.path.isfile(f):
self._download_caffe_meta()
def get_synset_words_1000(self):
fname = os.path.join(self.dir, 'synset_words.txt')
assert os.path.isfile(fname)
lines = [x.strip() for x in open(fname).readlines()]
return dict(enumerate(lines))
def get_synset_1000(self):
fname = os.path.join(self.dir, 'synsets.txt')
assert os.path.isfile(fname)
lines = [x.strip() for x in open(fname).readlines()]
return dict(enumerate(lines))
def _download_caffe_meta(self):
fpath = download(CAFFE_ILSVRC12_URL, self.dir)
tarfile.open(fpath, 'r:gz').extractall(self.dir)
def get_image_list(self, name):
assert name in ['train', 'val', 'test']
fname = os.path.join(self.dir, name + '.txt')
assert os.path.isfile(fname)
with open(fname) as f:
ret = []
for line in f.readlines():
name, cls = line.strip().split()
ret.append((name, int(cls)))
assert len(ret)
return ret
def get_per_pixel_mean(self, size=None):
obj = self.caffepb.BlobProto()
mean_file = os.path.join(self.dir, 'imagenet_mean.binaryproto')
with open(mean_file, 'rb') as f:
obj.ParseFromString(f.read())
arr = np.array(obj.data).reshape((3, 256, 256)).astype('float32')
arr = np.transpose(arr, [1,2,0])
if size is not None:
arr = cv2.resize(arr, size[::-1])
return arr
class ILSVRC12(RNGDataFlow):
def __init__(self, dir, name, meta_dir=None, shuffle=True,
dir_structure='original', include_bb=False):
assert name in ['train', 'test', 'val']
self.full_dir = os.path.join(dir, name)
self.name = name
assert os.path.isdir(self.full_dir), self.full_dir
self.shuffle = shuffle
meta = ILSVRCMeta(meta_dir)
self.imglist = meta.get_image_list(name)
self.dir_structure = dir_structure
self.synset = meta.get_synset_1000()
if include_bb:
bbdir = os.path.join(dir, 'bbox') if not \
isinstance(include_bb, six.string_types) else include_bb
assert name == 'train', 'Bounding box only available for training'
self.bblist = ILSVRC12.get_training_bbox(bbdir, self.imglist)
self.include_bb = include_bb
def size(self):
return len(self.imglist)
def get_data(self):
idxs = np.arange(len(self.imglist))
add_label_to_fname = (self.name != 'train' and self.dir_structure != 'original')
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
fname, label = self.imglist[k]
if add_label_to_fname:
fname = os.path.join(self.full_dir, self.synset[label], fname)
else:
fname = os.path.join(self.full_dir, fname)
im = cv2.imread(fname.strip(), cv2.IMREAD_COLOR)
assert im is not None, fname
if im.ndim == 2:
im = np.expand_dims(im, 2).repeat(3,2)
if self.include_bb:
bb = self.bblist[k]
if bb is None:
bb = [0, 0, im.shape[1]-1, im.shape[0]-1]
yield [im, label, bb]
else:
yield [im, label]
@staticmethod
def get_training_bbox(bbox_dir, imglist):
ret = []
def parse_bbox(fname):
root = ET.parse(fname).getroot()
size = root.find('size').getchildren()
size = map(int, [size[0].text, size[1].text])
box = root.find('object').find('bndbox').getchildren()
box = map(lambda x: float(x.text), box)
return np.asarray(box, dtype='float32')
with timed_operation('Loading Bounding Boxes ...'):
cnt = 0
import tqdm
for k in tqdm.trange(len(imglist)):
fname = imglist[k][0]
fname = fname[:-4] + 'xml'
fname = os.path.join(bbox_dir, fname)
try:
ret.append(parse_bbox(fname))
cnt += 1
except KeyboardInterrupt:
raise
except:
ret.append(None)
logger.info("{}/{} images have bounding box.".format(cnt, len(imglist)))
return ret
if __name__ == '__main__':
meta = ILSVRCMeta()
ds = ILSVRC12('/home/wyx/data/fake_ilsvrc/', 'train', include_bb=True,
shuffle=False)
ds.reset_state()
for k in ds.get_data():
from IPython import embed; embed()
break
| true | true |
f71e9c66cde7730f1b239e26a61bd195378915a1 | 8,835 | py | Python | app/clean_test_app.py | droyston/spectralize | 572770e7358acc3ec433470659759c17453409f2 | [
"MIT"
] | null | null | null | app/clean_test_app.py | droyston/spectralize | 572770e7358acc3ec433470659759c17453409f2 | [
"MIT"
] | null | null | null | app/clean_test_app.py | droyston/spectralize | 572770e7358acc3ec433470659759c17453409f2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 18:54:48 2020
@author: dylanroyston
"""
# -*- coding: utf-8 -*-
# import packages
#import dash_player
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import psycopg2
import os
import pandas as pd
import numpy as np
import plotly
import plotly.express as px
import plotly.graph_objects as go
import librosa
import librosa.display as ld
import IPython.display as ipd
import pylab as pl
import boto3
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#from matplotlib import cm
#from colorspacious import cspace_converter
#from collections import OrderedDict
######
# connect to PSQL and retrieve
psql_usr = os.environ.get('PSQL_USR')
psql_pw = os.environ.get('PSQL_PW')
conn = psycopg2.connect(host = 'ec2-13-58-251-142.us-east-2.compute.amazonaws.com',
dbname = 'spectralize',
user='postgres',
password=psql_pw)
##### read out metadata
metadata = conn.cursor()
metadata.execute("SELECT * FROM clean_metadata WHERE false;")
cols = set(metadata.fetchall())
metadata.execute("SELECT * FROM clean_metadata;")
md = set(metadata.fetchall())
cols = ["s3_key", "song_id", "album", "albumartist", "artist",
"audio_offset", "bitrate", "channels", "comment", "composer",
"disc", "disc_total", "duration", "filesize", "genre",
"samplerate", "title", "track", "track_total", "year"]
tag_df = pd.DataFrame(data=md, columns=cols)
##### s3 acess for playing audio files
s3_bucket = 'mdp-spectralize-pal'
number_of_files = 0
s3 = boto3.resource('s3')
bucket = s3.Bucket(s3_bucket)
# placeholders for callback initialization
standin_fp = '/home/dylanroyston/Documents/GIT/spectralize/app/hello.wav'
audio_sd_file = standin_fp
#audio_rawfile, new_sr = librosa.load(standin_fp, sr=None)
standin_data = np.array([[0,0],[0,0]])
standin_df = pd.DataFrame(standin_data, columns=['x','y'])
#audio_fig = px.line(standin_df, x='x', y='y', title='audio data', render_mode='webgl')
spec_fig = px.imshow(standin_df)
def load_audio_data(selected_row):
# read out audio data
#curr_song_id = tag_df.iloc[selected_row]['song_id']
curr_song_id = selected_row
# audiodata = conn.cursor()
# qstring = 'SELECT intensity FROM clean_audio WHERE song_id=' + str(curr_song_id)
# audiodata.execute(qstring)
# ad = np.array(audiodata.fetchall())
# audio_df = pd.DataFrame(data=ad, columns=['I'])
# audio_fig = px.line(audio_df, x=audio_df.index, y='I', title='audio data', render_mode='webgl')
# audio_fig.update_layout(
# height=250,
# margin_r=0,
# margin_l=0,
# margin_t=0,
# yaxis_title='',
# yaxis_fixedrange=True)
s3_key = tag_df.iloc[curr_song_id]['s3_key']
#this_row = tag_df.loc[tag_df['song_id'] == curr_song_id]
#s3_key = tag_df.iloc[this_row]['s3_key']
ext = s3_key[-4:]
audio_sd_file = '/home/dylanroyston/Documents/GIT/spectralize/app/audio_file' + ext
bucket.download_file(s3_key, audio_sd_file)
#audio_rawfile = librosa.load(audio_sd_file)
return audio_sd_file#, audio_fig
def load_spec_data(selected_row):
curr_song_id = selected_row
specdata = conn.cursor()
qstring = 'SELECT * FROM clean_spec WHERE song_id=' + str(curr_song_id)
specdata.execute(qstring)
sd = np.array(specdata.fetchall())
spec_df = pd.DataFrame(data=sd)
#currtitle = tag_df.iloc[curr_song_id]['title']
#currdur = tag_df.iloc[curr_song_id]['duration']
# numpts = len(sd)
# interval = float(currdur) / numpts
# timeline = np.linspace(0,float(currdur),numpts)
# rt = timeline.round(0)
trim_sd = spec_df.iloc[:,2:]
spec_fig = px.imshow(trim_sd.transpose(),
origin='lower',
#title=currtitle,
#x=timeline
)
spec_fig.update_layout(
height=250,
margin_r=0,
margin_l=0,
margin_t=0,
yaxis_title='Frequency',
xaxis_title='Time',
#colorbar.title='power',
yaxis_fixedrange=True,
#x=str(rt)
#title=currtitle
)
return spec_fig
#####
# initialize Dash app
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
# header
html.H1(children='Metadata'),
# metadata table
dash_table.DataTable(
id = 'metadata_table',
data=tag_df.to_dict('rows'),
columns=[{'id': c, 'name': c} for c in tag_df.columns],
style_cell={
'overflowX': 'auto',
'overflow': 'hidden',
'textOverflow': 'ellipsis',
'maxWidth': 10,
'row_selectable': 'single',
'font_family': 'Arial',
'font_size': '1.5rem',
'padding': '.5rem',
'backgroundColor': '#f4f4f2'
},
style_cell_conditional=[
{'textAlign': 'center'}
],
style_header={
'backgroundColor':'#f4f4f2',
'fontWeight': 'bold',
'overflowX': 'auto',
'textOverflow': 'ellipsis'
},
style_table={
'maxHeight':'500px',
'overflowX': 'scroll'
},
tooltip_data=[
{
column: {'value': str(value), 'type': 'markdown'}
for column, value in row.items()
} for row in tag_df.to_dict('rows')
],
tooltip_duration=None,
style_as_list_view=True,
),# end table
# load audio button
html.Br(),
html.Div(
[
dcc.Input(id='input_songnum', value='input song number', type='number'),
html.Button('Load audio',
id='submit-val',
style={'display': 'inline-block'},
n_clicks=0),
html.Div(id='song_input')
],
),
html.Br(),
# html.Audio(id="player", src=audio_sd_file, controls=True, style={
# "width": "100%"
# }),
# dash_player.DashPlayer(
# id='player',
# url='audio_sd_file',
# controls=True
# ),
html.Br(),
#dcc.Graph(id='waveform', figure=audio_fig),
html.Br(),
dcc.Graph(id='spect', figure=spec_fig)
])
##### finish Dash layout
##### callbacks
# load-audio button control
# @app.callback(
# Output('input_songnum', 'value'),
# [Input('submit-val', 'n_clicks')]
# )
# def retrieve_audio(value):
# return load_audio_data(value)
# @app.callback(
# Output('waveform', 'figure'),
# [Input('submit-val', 'n_clicks')]
# )
# def update_A_figure(submit_val):
# audio_fig = load_audio_data(submit_val)
# return audio_fig
## update audio player
# @app.callback(
# Output('player', 'src'),
# [Input('submit-val', 'n_clicks')]
# )
# def update_player(submit_val):
# audio_sd_file = load_audio_data(submit_val)
# return audio_sd_file
## update spect figure on button click
@app.callback(
Output('spect', 'figure'),
[Input('submit-val', 'n_clicks'),
Input('input_songnum', 'value')]
)
def update_S_figure(n_clicks, value):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'submit-val' in changed_id:
spec_fig = load_spec_data(value)
return spec_fig
## combined audiofile/spec update
# @app.callback(
# [Output('player', 'src'),
# Output('spect', 'figure')],
# [Input('submit-val', 'n_clicks')]
# )
# def update_figures(submit_val):
# audio_sd_file = load_audio_data(submit_val)
# spec_fig = load_spec_data(submit_val)
# return audio_sd_file, spec_fig
# @app.callback(
# Output('metadata_table', 'derived_virtual_selected_rows'),
# [Input('submit-val', 'n_clicks'),
# State('metadata_table', 'derived_virtual_selected_rows')]
# )
# def update_audio(n_clicks, derived_virtual_selected_rows):
# if derived_virtual_selected_rows is None:
# derived_virtual_selected_rows = []
# return load_audio_data(derived_virtual_selected_rows)
if __name__ == '__main__':
#app.run_server(debug=True, port=8050, host='127.0.0.1')
app.run_server(debug=True, port=8050, host='127.0.0.1')
#app.run_server(debug=True, port=80, host='ec2-18-224-114-72.us-east-2.compute.amazonaws.com')
| 26.216617 | 101 | 0.603509 |
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import psycopg2
import os
import pandas as pd
import numpy as np
import plotly
import plotly.express as px
import plotly.graph_objects as go
import librosa
import librosa.display as ld
import IPython.display as ipd
import pylab as pl
import boto3
os.environ.get('PSQL_USR')
psql_pw = os.environ.get('PSQL_PW')
conn = psycopg2.connect(host = 'ec2-13-58-251-142.us-east-2.compute.amazonaws.com',
dbname = 'spectralize',
user='postgres',
password=psql_pw)
lse;")
cols = set(metadata.fetchall())
metadata.execute("SELECT * FROM clean_metadata;")
md = set(metadata.fetchall())
cols = ["s3_key", "song_id", "album", "albumartist", "artist",
"audio_offset", "bitrate", "channels", "comment", "composer",
"disc", "disc_total", "duration", "filesize", "genre",
"samplerate", "title", "track", "track_total", "year"]
tag_df = pd.DataFrame(data=md, columns=cols)
royston/Documents/GIT/spectralize/app/hello.wav'
audio_sd_file = standin_fp
standin_data = np.array([[0,0],[0,0]])
standin_df = pd.DataFrame(standin_data, columns=['x','y'])
spec_fig = px.imshow(standin_df)
def load_audio_data(selected_row):
curr_song_id = selected_row
s3_key = tag_df.iloc[curr_song_id]['s3_key']
ext = s3_key[-4:]
audio_sd_file = '/home/dylanroyston/Documents/GIT/spectralize/app/audio_file' + ext
bucket.download_file(s3_key, audio_sd_file)
return audio_sd_file
def load_spec_data(selected_row):
curr_song_id = selected_row
specdata = conn.cursor()
qstring = 'SELECT * FROM clean_spec WHERE song_id=' + str(curr_song_id)
specdata.execute(qstring)
sd = np.array(specdata.fetchall())
spec_df = pd.DataFrame(data=sd)
trim_sd = spec_df.iloc[:,2:]
spec_fig = px.imshow(trim_sd.transpose(),
origin='lower',
)
spec_fig.update_layout(
height=250,
margin_r=0,
margin_l=0,
margin_t=0,
yaxis_title='Frequency',
xaxis_title='Time',
yaxis_fixedrange=True,
)
return spec_fig
_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H1(children='Metadata'),
dash_table.DataTable(
id = 'metadata_table',
data=tag_df.to_dict('rows'),
columns=[{'id': c, 'name': c} for c in tag_df.columns],
style_cell={
'overflowX': 'auto',
'overflow': 'hidden',
'textOverflow': 'ellipsis',
'maxWidth': 10,
'row_selectable': 'single',
'font_family': 'Arial',
'font_size': '1.5rem',
'padding': '.5rem',
'backgroundColor': '#f4f4f2'
},
style_cell_conditional=[
{'textAlign': 'center'}
],
style_header={
'backgroundColor':'#f4f4f2',
'fontWeight': 'bold',
'overflowX': 'auto',
'textOverflow': 'ellipsis'
},
style_table={
'maxHeight':'500px',
'overflowX': 'scroll'
},
tooltip_data=[
{
column: {'value': str(value), 'type': 'markdown'}
for column, value in row.items()
} for row in tag_df.to_dict('rows')
],
tooltip_duration=None,
style_as_list_view=True,
),
html.Br(),
html.Div(
[
dcc.Input(id='input_songnum', value='input song number', type='number'),
html.Button('Load audio',
id='submit-val',
style={'display': 'inline-block'},
n_clicks=0),
html.Div(id='song_input')
],
),
html.Br(),
html.Br(),
html.Br(),
dcc.Graph(id='spect', figure=spec_fig)
])
s, value):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'submit-val' in changed_id:
spec_fig = load_spec_data(value)
return spec_fig
if __name__ == '__main__':
app.run_server(debug=True, port=8050, host='127.0.0.1')
| true | true |
f71e9d55e1ce3fe397a614f51b15c10f303d5fcd | 2,491 | py | Python | cogs/idk.py | Mr-Owllers/owll | 4753ec57429dbf06da0850a40ddd0ba7c8964bc6 | [
"MIT"
] | 1 | 2022-01-12T17:11:10.000Z | 2022-01-12T17:11:10.000Z | cogs/idk.py | Mr-Owllers/owll | 4753ec57429dbf06da0850a40ddd0ba7c8964bc6 | [
"MIT"
] | null | null | null | cogs/idk.py | Mr-Owllers/owll | 4753ec57429dbf06da0850a40ddd0ba7c8964bc6 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import aiohttp
hug = ["https://c.tenor.com/bFZKN-tlQP4AAAAC/love-you-my-best-friend.gif", "https://c.tenor.com/KlkE8vt8gOIAAAAM/love-is-the-answer-to-everything-hug.gif", "https://c.tenor.com/OkpKo5iPu-8AAAAM/huge-hug.gif", "https://c.tenor.com/BW8ZMOHHrgMAAAAM/friends-joey-tribbiani.gif", "https://c.tenor.com/ut3cq1GezaoAAAAM/hug-hugs.gif", "https://media1.tenor.com/images/8ac5ada8524d767b77d3d54239773e48/tenor.gif?itemid=16334628", "https://c.tenor.com/0gz0aKX9vcQAAAAC/owl-hug-sweet.gif"]
import random
class general(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(help="Invite me!", aliases=["inv", "i"])
async def invite(self, ctx):
async with ctx.typing():
embed = discord.Embed(
author="Owll",
title="Invite me!",
description="Invite me by pressing [here](https://dsc/owll)",
footer="I love you"
)
await ctx.message.reply(embed=embed)
@commands.command(help="get a link to the support server", aliases=["xtrahelp", "extrahelp", "helpme"])
async def support(self, ctx):
async with ctx.typing():
embed = discord.Embed(
author="Owll",
title="Support server",
description="You may join our [support server](https://dsc.gg/goldwilde) :D"
)
await ctx.message.reply(embed=embed)
@commands.command(help="hug someone!", aliases=["hog"])
async def hug(self, ctx, members: commands.Greedy[discord.Member]):
async with aiohttp.ClientSession() as cs:
async with ctx.typing():
async with cs.get("https://some-random-api.ml/animu/hug") as r:
js = await r.json()
if not members:
return await ctx.send("Please specify someone to hug.")
if ctx.author in members:
return await ctx.send("do you... need a hug?")
e = discord.Embed(color=0xff0000, description=f"**{ctx.message.author.display_name}** hugs " + "**" + '**, **'.join(x.display_name for x in members) + "**")
manual = hug
manual.append(js['link'])
image = random.choice(manual)
e.set_image(url=image)
await ctx.send(embed=e)
def setup(client):
client.add_cog(general(client)) | 44.482143 | 480 | 0.594942 | import discord
from discord.ext import commands
import aiohttp
hug = ["https://c.tenor.com/bFZKN-tlQP4AAAAC/love-you-my-best-friend.gif", "https://c.tenor.com/KlkE8vt8gOIAAAAM/love-is-the-answer-to-everything-hug.gif", "https://c.tenor.com/OkpKo5iPu-8AAAAM/huge-hug.gif", "https://c.tenor.com/BW8ZMOHHrgMAAAAM/friends-joey-tribbiani.gif", "https://c.tenor.com/ut3cq1GezaoAAAAM/hug-hugs.gif", "https://media1.tenor.com/images/8ac5ada8524d767b77d3d54239773e48/tenor.gif?itemid=16334628", "https://c.tenor.com/0gz0aKX9vcQAAAAC/owl-hug-sweet.gif"]
import random
class general(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(help="Invite me!", aliases=["inv", "i"])
async def invite(self, ctx):
async with ctx.typing():
embed = discord.Embed(
author="Owll",
title="Invite me!",
description="Invite me by pressing [here](https://dsc/owll)",
footer="I love you"
)
await ctx.message.reply(embed=embed)
@commands.command(help="get a link to the support server", aliases=["xtrahelp", "extrahelp", "helpme"])
async def support(self, ctx):
async with ctx.typing():
embed = discord.Embed(
author="Owll",
title="Support server",
description="You may join our [support server](https://dsc.gg/goldwilde) :D"
)
await ctx.message.reply(embed=embed)
@commands.command(help="hug someone!", aliases=["hog"])
async def hug(self, ctx, members: commands.Greedy[discord.Member]):
async with aiohttp.ClientSession() as cs:
async with ctx.typing():
async with cs.get("https://some-random-api.ml/animu/hug") as r:
js = await r.json()
if not members:
return await ctx.send("Please specify someone to hug.")
if ctx.author in members:
return await ctx.send("do you... need a hug?")
e = discord.Embed(color=0xff0000, description=f"**{ctx.message.author.display_name}** hugs " + "**" + '**, **'.join(x.display_name for x in members) + "**")
manual = hug
manual.append(js['link'])
image = random.choice(manual)
e.set_image(url=image)
await ctx.send(embed=e)
def setup(client):
client.add_cog(general(client)) | true | true |
f71e9e447882fb2e25d1fc3c1cdbf309c949b7a8 | 7,334 | py | Python | pychron/hardware/gauges/base_controller.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 31 | 2016-03-07T02:38:17.000Z | 2022-02-14T18:23:43.000Z | pychron/hardware/gauges/base_controller.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 1,626 | 2015-01-07T04:52:35.000Z | 2022-03-25T19:15:59.000Z | pychron/hardware/gauges/base_controller.py | UIllinoisHALPychron/pychron | f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc | [
"Apache-2.0"
] | 26 | 2015-05-23T00:10:06.000Z | 2022-03-07T16:51:57.000Z | # ===============================================================================
# Copyright 2017 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from traits.api import HasTraits, List, Str, Float, Int
from traitsui.api import View, HGroup, Item, Group, InstanceEditor, ListEditor
from pychron.core.ui.color_map_bar_editor import BarGaugeEditor
from pychron.graph.time_series_graph import TimeSeriesStreamStackedGraph
class BaseGauge(HasTraits):
name = Str
pressure = Float
display_name = Str
low = 5e-10
high = 1e-8
color_scalar = 1
width = Int(100)
channel = Str
def traits_view(self):
v = View(
HGroup(
Item(
"display_name",
show_label=False,
style="readonly",
width=-100,
),
Item(
"pressure", format_str="%0.2e", show_label=False, style="readonly"
),
Item(
"pressure",
show_label=False,
width=self.width,
editor=BarGaugeEditor(
low=self.low,
high=self.high,
scale="power",
color_scalar=self.color_scalar,
width=self.width,
),
),
)
)
return v
class BaseGaugeController(HasTraits):
address = Str
gauges = List
display_name = Str
gauge_klass = BaseGauge
graph_klass = TimeSeriesStreamStackedGraph
def initialize(self, *args, **kw):
self.scan_func = "update_pressures"
self.graph_y_title = "Pressure (torr)"
return True
def update_pressures(self, verbose=False):
if verbose:
self.debug("update pressures")
resps = [self._update_pressure(g, verbose) for g in self.gauges]
return tuple(resps)
def get_gauge(self, name):
return next(
(gi for gi in self.gauges if gi.name == name or gi.display_name == name),
None,
)
def get_pressure(self, gauge, force=False, verbose=False):
if isinstance(gauge, str):
gauge = self.get_gauge(gauge)
if gauge is not None:
if force:
self._update_pressure(gauge.name, verbose)
return gauge.pressure
def get_pressures(self, force=False, **kw):
if force:
self.update_pressures(**kw)
return [g.pressure for g in self.gauges]
def _pressure_change(self, obj, name, old, new):
self.trait_set(**{"{}_pressure".format(obj.name): new})
def _read_pressure(self, *args, **kw):
raise NotImplementedError
def _set_gauge_pressure(self, gauge, v):
if isinstance(gauge, str):
gauge = self.get_gauge(gauge)
if gauge is not None:
try:
gauge.pressure = float(v)
return True
except (TypeError, ValueError):
pass
def _get_pressure(self, name, verbose=False, force=False):
if self._scanning and not force:
attr = "{}_pressure".format(name)
if hasattr(self, attr):
return getattr(self, attr)
return self._read_pressure(name, verbose)
def _update_pressure(self, gauge, verbose=False):
if isinstance(gauge, str):
gauge = self.get_gauge(gauge)
if verbose:
self.debug("_update_pressure: {}".format(gauge))
if gauge:
p = self._read_pressure(gauge, verbose)
if self._set_gauge_pressure(gauge, p):
return p
def _load_gauges(self, config, *args, **kw):
ns = self.config_get(config, "Gauges", "names")
if ns:
ans = self.config_get(config, "Gauges", "display_names", optional=True)
if not ans:
ans = ns
lows = self.config_get(
config, "Gauges", "lows", optional=True, default="1e-10, 1e-3, 1e-3"
)
highs = self.config_get(
config, "Gauges", "highs", optional=True, default="1e-6, 1, 1"
)
cs = self.config_get(
config, "Gauges", "color_scalars", optional=True, default="1, 1, 1"
)
chs = self.config_get(
config, "Gauges", "channels", optional=True, default="1, 2, 3"
)
for gi in zip(*[x.split(",") for x in (ns, ans, lows, highs, cs, chs)]):
# ni, ai, li, hi, ci, cn = list(map(str.strip, gi))
ni, ai, li, hi, ci, cn = [gg.strip() for gg in gi]
g = self.gauge_klass(name=ni, display_name=ai, channel=cn)
try:
g.low = float(li)
except ValueError as e:
self.warning_dialog(
"Invalid lows string. {}".format(e), title=self.config_path
)
continue
try:
g.high = float(hi)
except ValueError as e:
self.warning_dialog(
"Invalid highs string. {}".format(e), title=self.config_path
)
continue
try:
g.color_scalar = int(ci)
except ValueError as e:
self.warning_dialog(
"Invalid color_scalar string. {}".format(e),
title=self.config_path,
)
continue
p = "{}_pressure".format(ni)
self.add_trait(p, Float)
g.on_trait_change(self._pressure_change, "pressure")
self.gauges.append(g)
def gauge_view(self):
v = View(
Group(
Item(
"gauges",
style="custom",
show_label=False,
editor=ListEditor(
mutable=False, style="custom", editor=InstanceEditor()
),
),
show_border=True,
label=self.display_name,
)
)
return v
def graph_builder(self, g, **kw):
for i, gi in enumerate(self.gauges):
g.new_plot(padding=[50, 5, 5, 35], zoom=True, pan=True)
g.set_y_title(self.graph_ytitle, plotid=i)
g.set_x_title("Time")
g.new_series(plotid=i)
g.set_series_label(gi.display_name, plotid=i)
# ============= EOF =============================================
| 33.036036 | 86 | 0.504363 |
from traits.api import HasTraits, List, Str, Float, Int
from traitsui.api import View, HGroup, Item, Group, InstanceEditor, ListEditor
from pychron.core.ui.color_map_bar_editor import BarGaugeEditor
from pychron.graph.time_series_graph import TimeSeriesStreamStackedGraph
class BaseGauge(HasTraits):
name = Str
pressure = Float
display_name = Str
low = 5e-10
high = 1e-8
color_scalar = 1
width = Int(100)
channel = Str
def traits_view(self):
v = View(
HGroup(
Item(
"display_name",
show_label=False,
style="readonly",
width=-100,
),
Item(
"pressure", format_str="%0.2e", show_label=False, style="readonly"
),
Item(
"pressure",
show_label=False,
width=self.width,
editor=BarGaugeEditor(
low=self.low,
high=self.high,
scale="power",
color_scalar=self.color_scalar,
width=self.width,
),
),
)
)
return v
class BaseGaugeController(HasTraits):
address = Str
gauges = List
display_name = Str
gauge_klass = BaseGauge
graph_klass = TimeSeriesStreamStackedGraph
def initialize(self, *args, **kw):
self.scan_func = "update_pressures"
self.graph_y_title = "Pressure (torr)"
return True
def update_pressures(self, verbose=False):
if verbose:
self.debug("update pressures")
resps = [self._update_pressure(g, verbose) for g in self.gauges]
return tuple(resps)
def get_gauge(self, name):
return next(
(gi for gi in self.gauges if gi.name == name or gi.display_name == name),
None,
)
def get_pressure(self, gauge, force=False, verbose=False):
if isinstance(gauge, str):
gauge = self.get_gauge(gauge)
if gauge is not None:
if force:
self._update_pressure(gauge.name, verbose)
return gauge.pressure
def get_pressures(self, force=False, **kw):
if force:
self.update_pressures(**kw)
return [g.pressure for g in self.gauges]
def _pressure_change(self, obj, name, old, new):
self.trait_set(**{"{}_pressure".format(obj.name): new})
def _read_pressure(self, *args, **kw):
raise NotImplementedError
def _set_gauge_pressure(self, gauge, v):
if isinstance(gauge, str):
gauge = self.get_gauge(gauge)
if gauge is not None:
try:
gauge.pressure = float(v)
return True
except (TypeError, ValueError):
pass
def _get_pressure(self, name, verbose=False, force=False):
if self._scanning and not force:
attr = "{}_pressure".format(name)
if hasattr(self, attr):
return getattr(self, attr)
return self._read_pressure(name, verbose)
def _update_pressure(self, gauge, verbose=False):
if isinstance(gauge, str):
gauge = self.get_gauge(gauge)
if verbose:
self.debug("_update_pressure: {}".format(gauge))
if gauge:
p = self._read_pressure(gauge, verbose)
if self._set_gauge_pressure(gauge, p):
return p
def _load_gauges(self, config, *args, **kw):
ns = self.config_get(config, "Gauges", "names")
if ns:
ans = self.config_get(config, "Gauges", "display_names", optional=True)
if not ans:
ans = ns
lows = self.config_get(
config, "Gauges", "lows", optional=True, default="1e-10, 1e-3, 1e-3"
)
highs = self.config_get(
config, "Gauges", "highs", optional=True, default="1e-6, 1, 1"
)
cs = self.config_get(
config, "Gauges", "color_scalars", optional=True, default="1, 1, 1"
)
chs = self.config_get(
config, "Gauges", "channels", optional=True, default="1, 2, 3"
)
for gi in zip(*[x.split(",") for x in (ns, ans, lows, highs, cs, chs)]):
ni, ai, li, hi, ci, cn = [gg.strip() for gg in gi]
g = self.gauge_klass(name=ni, display_name=ai, channel=cn)
try:
g.low = float(li)
except ValueError as e:
self.warning_dialog(
"Invalid lows string. {}".format(e), title=self.config_path
)
continue
try:
g.high = float(hi)
except ValueError as e:
self.warning_dialog(
"Invalid highs string. {}".format(e), title=self.config_path
)
continue
try:
g.color_scalar = int(ci)
except ValueError as e:
self.warning_dialog(
"Invalid color_scalar string. {}".format(e),
title=self.config_path,
)
continue
p = "{}_pressure".format(ni)
self.add_trait(p, Float)
g.on_trait_change(self._pressure_change, "pressure")
self.gauges.append(g)
def gauge_view(self):
v = View(
Group(
Item(
"gauges",
style="custom",
show_label=False,
editor=ListEditor(
mutable=False, style="custom", editor=InstanceEditor()
),
),
show_border=True,
label=self.display_name,
)
)
return v
def graph_builder(self, g, **kw):
for i, gi in enumerate(self.gauges):
g.new_plot(padding=[50, 5, 5, 35], zoom=True, pan=True)
g.set_y_title(self.graph_ytitle, plotid=i)
g.set_x_title("Time")
g.new_series(plotid=i)
g.set_series_label(gi.display_name, plotid=i)
| true | true |
f71e9ef1b07ffec34348f9fc349bdc12ee2c9d58 | 2,061 | py | Python | tanuki/history/views.py | addisonmaupin/capstone2020 | cf8c8e7336aa9866859349838e4f42bc6831679c | [
"MIT"
] | null | null | null | tanuki/history/views.py | addisonmaupin/capstone2020 | cf8c8e7336aa9866859349838e4f42bc6831679c | [
"MIT"
] | 9 | 2021-03-19T14:50:48.000Z | 2022-03-12T00:47:25.000Z | tanuki/history/views.py | pabsromo/capstone2020 | cf8c8e7336aa9866859349838e4f42bc6831679c | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django import template
register = template.Library()
from django.contrib.auth.decorators import login_required
import json
from django.core.serializers.json import DjangoJSONEncoder
from django.forms.models import model_to_dict
from overview.models import AddItem
from overview.forms import AddItemForm
# Create your views here.
@login_required(login_url='login:index')
def history(request):
if request.method == 'POST':
form = AddItemForm(request.POST, label_suffix=' ')
if form.is_valid():
addItem = form.save(commit=False)
addItem.itemType = form.cleaned_data['itemType']
addItem.user = request.user
addItem.save() # save form after the user and itemType have been determined
itemName = form.cleaned_data['itemName']
itemPrice = form.cleaned_data['itemPrice']
return redirect('overview:home')
else:
context = {'form': form}
else:
# only show objects for authenticated user
items = AddItem.objects.filter(user=request.user)
form = AddItemForm(label_suffix=' ')
temp = []
if items.first() is None:
form = []
items = []
items_json = []
else:
print(items[0].itemName)
for item in items:
t = []
t.append(int(str(item.user.id)))
t.append(str(item.user))
t.append(item.itemName)
t.append(float(item.itemPrice))
t.append(item.itemType)
# t.append(item.dateAdded)
t.append(item.dateDisplayed.strftime('%m/%d/%Y'))
temp.append(t)
print(temp)
# items_json = json.dumps(temp, cls=DjangoJSONEncoder)
items_json = json.dumps(temp)
context = {
'form': form,
'items': items,
'items_json': items_json,
}
return render(request, 'history.html', context) | 34.932203 | 88 | 0.585153 | from django.shortcuts import render
from django import template
register = template.Library()
from django.contrib.auth.decorators import login_required
import json
from django.core.serializers.json import DjangoJSONEncoder
from django.forms.models import model_to_dict
from overview.models import AddItem
from overview.forms import AddItemForm
@login_required(login_url='login:index')
def history(request):
if request.method == 'POST':
form = AddItemForm(request.POST, label_suffix=' ')
if form.is_valid():
addItem = form.save(commit=False)
addItem.itemType = form.cleaned_data['itemType']
addItem.user = request.user
addItem.save()
itemName = form.cleaned_data['itemName']
itemPrice = form.cleaned_data['itemPrice']
return redirect('overview:home')
else:
context = {'form': form}
else:
items = AddItem.objects.filter(user=request.user)
form = AddItemForm(label_suffix=' ')
temp = []
if items.first() is None:
form = []
items = []
items_json = []
else:
print(items[0].itemName)
for item in items:
t = []
t.append(int(str(item.user.id)))
t.append(str(item.user))
t.append(item.itemName)
t.append(float(item.itemPrice))
t.append(item.itemType)
t.append(item.dateDisplayed.strftime('%m/%d/%Y'))
temp.append(t)
print(temp)
items_json = json.dumps(temp)
context = {
'form': form,
'items': items,
'items_json': items_json,
}
return render(request, 'history.html', context) | true | true |
f71e9f4ae8aeaa716d941ad00f7fd5c17c49b75c | 1,182 | py | Python | test/test_files/pylops/examples/plot_imag.py | SoftwareUnderstanding/inspect4py | 9c4d7252535082ad938b26baf281d93f3a27285e | [
"BSD-3-Clause"
] | 2 | 2022-02-15T20:30:57.000Z | 2022-03-17T00:50:37.000Z | test/test_files/pylops/examples/plot_imag.py | SoftwareUnderstanding/code_inspector | a820b5a7bb18f5df9c3e79346108d8280b20c39a | [
"BSD-3-Clause"
] | 101 | 2021-06-09T14:19:59.000Z | 2022-01-24T13:24:39.000Z | test/test_files/pylops/examples/plot_imag.py | SoftwareUnderstanding/inspect4py | 9c4d7252535082ad938b26baf281d93f3a27285e | [
"BSD-3-Clause"
] | 1 | 2021-09-22T06:59:32.000Z | 2021-09-22T06:59:32.000Z | """
Imag
====
This example shows how to use the :py:class:`pylops.basicoperators.Imag`
operator.
This operator returns the imaginary part of the data as a real value in
forward mode, and the real part of the model as an imaginary value in
adjoint mode (with zero real part).
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as pltgs
import pylops
plt.close('all')
###############################################################################
# Let's define a Imag operator :math:`\mathbf{\Im}` to extract the imaginary
# component of the input.
M = 5
x = np.arange(M) + 1j * np.arange(M)[::-1]
Rop = pylops.basicoperators.Imag(M, dtype='complex128')
y = Rop*x
xadj = Rop.H*y
_, axs = plt.subplots(1, 3, figsize=(10, 4))
axs[0].plot(np.real(x), lw=2, label='Real')
axs[0].plot(np.imag(x), lw=2, label='Imag')
axs[0].legend()
axs[0].set_title('Input')
axs[1].plot(np.real(y), lw=2, label='Real')
axs[1].plot(np.imag(y), lw=2, label='Imag')
axs[1].legend()
axs[1].set_title('Forward of Input')
axs[2].plot(np.real(xadj), lw=2, label='Real')
axs[2].plot(np.imag(xadj), lw=2, label='Imag')
axs[2].legend()
axs[2].set_title('Adjoint of Forward')
| 27.488372 | 79 | 0.643824 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as pltgs
import pylops
plt.close('all')
| true | true |
f71e9fc678e62a608c02041f6e0914f9fcbec0c2 | 674 | py | Python | test/test_tensorboard.py | ethan4335/pytorch-YOLOv4 | 44f67130d83fc2949efb50afe67337735836169b | [
"Apache-2.0"
] | null | null | null | test/test_tensorboard.py | ethan4335/pytorch-YOLOv4 | 44f67130d83fc2949efb50afe67337735836169b | [
"Apache-2.0"
] | null | null | null | test/test_tensorboard.py | ethan4335/pytorch-YOLOv4 | 44f67130d83fc2949efb50afe67337735836169b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = 'pytorch-YOLOv4'
__author__ = 'deagle'
__date__ = '11/23/2020 11:30'
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
"""
import datetime
from tensorboardX import SummaryWriter
def main():
from tensorboardX import SummaryWriter
writer = SummaryWriter()
x = range(100)
for i in x:
writer.add_scalar('y=2x', i * 2, i)
writer.close()
if __name__ == '__main__':
start_time = datetime.datetime.now()
main()
end_time = datetime.datetime.now()
time_cost = end_time - start_time
print(str(time_cost).split('.')[0])
| 23.241379 | 59 | 0.663205 |
import datetime
from tensorboardX import SummaryWriter
def main():
from tensorboardX import SummaryWriter
writer = SummaryWriter()
x = range(100)
for i in x:
writer.add_scalar('y=2x', i * 2, i)
writer.close()
if __name__ == '__main__':
start_time = datetime.datetime.now()
main()
end_time = datetime.datetime.now()
time_cost = end_time - start_time
print(str(time_cost).split('.')[0])
| true | true |
f71e9fdd77eb43e16314dd553b822f51a7dab59b | 3,213 | py | Python | enCount/tests/gtfs.py | mstrazar/enCount | dcff565ce96afe37aa8a41995637d00cce02360d | [
"MIT"
] | null | null | null | enCount/tests/gtfs.py | mstrazar/enCount | dcff565ce96afe37aa8a41995637d00cce02360d | [
"MIT"
] | null | null | null | enCount/tests/gtfs.py | mstrazar/enCount | dcff565ce96afe37aa8a41995637d00cce02360d | [
"MIT"
] | null | null | null | # coding=utf-8
import os
import enCount.gtfs as gtfs
import enCount.db as db
import enCount.queues as queue
from enCount.config import genomes_root
import datetime
import unittest
import time
# Mock system calls
from mock import Mock
gtfs.rnastar.sp_call = Mock(return_value=0)
gtfs.get_version_before = Mock(return_value="chM")
class TestGtfs(unittest.TestCase):
"""
Test for the gtfs queue and DB.
Assumes directory structure:
/endata/genomes
/endata/genomes/gtf
/endata/genomes/gtf/minimal.gtf
/endata/genomes/fasta
/endata/genomes/index
"""
def setUp(self):
db.gtfs.drop()
self.gtf_ver = gtfs.get_version_before(datetime.datetime.min)
self.in_gtf = gtfs.version_to_path(gtf_ver=self.gtf_ver)
self.in_gff = gtfs.version_to_path(gtf_ver=self.gtf_ver, gff=True)
self.genome_dir = gtfs.get_genome_index_dir(self.gtf_ver)
if os.path.exists(self.in_gff):
os.remove(self.in_gff)
def test_gtf(self):
"""
Simple test of the genome index generation pipeline without using the queue mechanism.
"""
self.assertEqual(db.gtfs.find().count(), 1)
self.assertTrue(os.path.exists(self.in_gtf))
# Generate a genome index and get path via DB
in_genome_fasta_dir = os.path.join(genomes_root, "fasta", self.gtf_ver)
self.assertTrue(os.path.isdir(in_genome_fasta_dir))
# Insert record into database
self.assertTrue(os.path.isdir(self.genome_dir))
# The method gtfs.get_genome_index_dir is called within gtfs.generate_genome_index
# and shall not be called elsewhere before a genome index is generated
gtfs.generate_genome_index(self.in_gtf, in_genome_fasta_dir, self.genome_dir, self.in_gff)
self.genome_dir = gtfs.get_genome_index_dir(self.gtf_ver)
# Check mapping
mappings = list(db.gtfs.find({"gtf_ver": self.gtf_ver, "status": "ready"}))
self.assertEqual(len(mappings), 1)
def test_process_queue(self):
"""
Simple test of the genome index generation pipeline using the process queue.
"""
self.assertEqual(db.gtfs.find().count(), 1)
# Get latest genome version
self.assertTrue(os.path.exists(self.in_gtf))
# Process gtf_version
mappings = list(db.gtfs.find({"gtf_ver": self.gtf_ver}))
self.assertEqual(len(mappings), 1)
# Process outstanding requests; Mock submitted jobs explicitly
empty = False
while not empty:
gtfs.process(mock=True)
empty = queue.gtfs.is_empty()
# Wait for database to refresh
# TODO: is there a cleaner way to ensure transactions?
mappings = list(db.gtfs.find({"gtf_ver": self.gtf_ver, "status": "ready"}))
while not len(mappings):
time.sleep(1)
mappings = list(db.gtfs.find({"gtf_ver": self.gtf_ver, "status": "ready"}))
# Make sure results exist
self.genome_dir = gtfs.get_genome_index_dir(self.gtf_ver)
self.assertTrue(os.path.isdir(self.genome_dir))
if __name__ == "__main__":
unittest.main()
| 33.123711 | 98 | 0.659197 |
import os
import enCount.gtfs as gtfs
import enCount.db as db
import enCount.queues as queue
from enCount.config import genomes_root
import datetime
import unittest
import time
from mock import Mock
gtfs.rnastar.sp_call = Mock(return_value=0)
gtfs.get_version_before = Mock(return_value="chM")
class TestGtfs(unittest.TestCase):
def setUp(self):
db.gtfs.drop()
self.gtf_ver = gtfs.get_version_before(datetime.datetime.min)
self.in_gtf = gtfs.version_to_path(gtf_ver=self.gtf_ver)
self.in_gff = gtfs.version_to_path(gtf_ver=self.gtf_ver, gff=True)
self.genome_dir = gtfs.get_genome_index_dir(self.gtf_ver)
if os.path.exists(self.in_gff):
os.remove(self.in_gff)
def test_gtf(self):
self.assertEqual(db.gtfs.find().count(), 1)
self.assertTrue(os.path.exists(self.in_gtf))
in_genome_fasta_dir = os.path.join(genomes_root, "fasta", self.gtf_ver)
self.assertTrue(os.path.isdir(in_genome_fasta_dir))
self.assertTrue(os.path.isdir(self.genome_dir))
gtfs.generate_genome_index(self.in_gtf, in_genome_fasta_dir, self.genome_dir, self.in_gff)
self.genome_dir = gtfs.get_genome_index_dir(self.gtf_ver)
mappings = list(db.gtfs.find({"gtf_ver": self.gtf_ver, "status": "ready"}))
self.assertEqual(len(mappings), 1)
def test_process_queue(self):
self.assertEqual(db.gtfs.find().count(), 1)
self.assertTrue(os.path.exists(self.in_gtf))
mappings = list(db.gtfs.find({"gtf_ver": self.gtf_ver}))
self.assertEqual(len(mappings), 1)
empty = False
while not empty:
gtfs.process(mock=True)
empty = queue.gtfs.is_empty()
mappings = list(db.gtfs.find({"gtf_ver": self.gtf_ver, "status": "ready"}))
while not len(mappings):
time.sleep(1)
mappings = list(db.gtfs.find({"gtf_ver": self.gtf_ver, "status": "ready"}))
self.genome_dir = gtfs.get_genome_index_dir(self.gtf_ver)
self.assertTrue(os.path.isdir(self.genome_dir))
if __name__ == "__main__":
unittest.main()
| true | true |
f71ea15a3576fb6c010db825bd3096593169e5a3 | 1,999 | py | Python | apps/fithm-service/apps/model/models.py | sergio1221/flask-backend | 11a9e0db5b5e664fcc820919d97039738176ac62 | [
"BSD-3-Clause"
] | 3 | 2022-03-04T03:05:55.000Z | 2022-03-04T09:02:32.000Z | apps/fithm-service/apps/model/models.py | sergio1221/flask-backend | 11a9e0db5b5e664fcc820919d97039738176ac62 | [
"BSD-3-Clause"
] | null | null | null | apps/fithm-service/apps/model/models.py | sergio1221/flask-backend | 11a9e0db5b5e664fcc820919d97039738176ac62 | [
"BSD-3-Clause"
] | null | null | null | from sqlalchemy import (
Column,
String,
ForeignKey,
Float,
Integer,
Boolean
)
from sqlalchemy.orm import relationship
from sqlalchemy.dialects import postgresql
from libs.database import Base, Stateful
class Model(Stateful):
'''Model table'''
__tablename__ = 'models'
id = Column(Integer, primary_key=True)
business_id = Column(Integer, ForeignKey('businesses.id'), nullable=False)
name = Column(String)
description = Column(String)
keywords = Column("data", postgresql.ARRAY(String))
is_public = Column(Boolean, default=False, nullable=False)
business = relationship("Business", back_populates="models")
allocation = relationship(
"ModelPosition", back_populates="model", cascade="all, delete, delete-orphan")
portfolio = relationship("Portfolio", back_populates="model")
def as_dict(self):
result = {
'id': self.id,
'name': self.name,
'keywords': [],
'is_public': self.is_public,
'description': self.description
}
result['user_id'] = self.business.user_id
if self.allocation:
result['positions'] = [a.as_dict() for a in self.allocation]
if self.keywords:
result['keywords'] = [k for k in self.keywords]
return result
class ModelPosition(Base):
__tablename__ = 'model_positions'
id = Column(Integer, primary_key=True)
model_id = Column(Integer, ForeignKey('models.id'), nullable=False)
symbol = Column(String)
weight = Column(Float)
price = Column(Float)
model = relationship("Model", back_populates="allocation")
trade_prices = relationship(
"Price", back_populates="model_position", cascade="all, delete, delete-orphan")
def as_dict(self):
result = {'model_id': self.model_id,
'symbol': self.symbol, 'weight': self.weight}
return result
| 32.770492 | 88 | 0.626313 | from sqlalchemy import (
Column,
String,
ForeignKey,
Float,
Integer,
Boolean
)
from sqlalchemy.orm import relationship
from sqlalchemy.dialects import postgresql
from libs.database import Base, Stateful
class Model(Stateful):
__tablename__ = 'models'
id = Column(Integer, primary_key=True)
business_id = Column(Integer, ForeignKey('businesses.id'), nullable=False)
name = Column(String)
description = Column(String)
keywords = Column("data", postgresql.ARRAY(String))
is_public = Column(Boolean, default=False, nullable=False)
business = relationship("Business", back_populates="models")
allocation = relationship(
"ModelPosition", back_populates="model", cascade="all, delete, delete-orphan")
portfolio = relationship("Portfolio", back_populates="model")
def as_dict(self):
result = {
'id': self.id,
'name': self.name,
'keywords': [],
'is_public': self.is_public,
'description': self.description
}
result['user_id'] = self.business.user_id
if self.allocation:
result['positions'] = [a.as_dict() for a in self.allocation]
if self.keywords:
result['keywords'] = [k for k in self.keywords]
return result
class ModelPosition(Base):
__tablename__ = 'model_positions'
id = Column(Integer, primary_key=True)
model_id = Column(Integer, ForeignKey('models.id'), nullable=False)
symbol = Column(String)
weight = Column(Float)
price = Column(Float)
model = relationship("Model", back_populates="allocation")
trade_prices = relationship(
"Price", back_populates="model_position", cascade="all, delete, delete-orphan")
def as_dict(self):
result = {'model_id': self.model_id,
'symbol': self.symbol, 'weight': self.weight}
return result
| true | true |
f71ea1692f9195dcc858ae156af3624dfca9a2ef | 1,661 | py | Python | TA-linode/bin/ta_linode/aob_py3/splunktalib/file_monitor.py | jriddle-linode/splunk-addon-linode | 5954acd12ef88ab991365ef51072db68aed46aa1 | [
"Apache-2.0"
] | 11 | 2020-01-23T11:32:26.000Z | 2021-09-23T09:24:02.000Z | TA-linode/bin/ta_linode/aob_py3/splunktalib/file_monitor.py | jriddle-linode/splunk-addon-linode | 5954acd12ef88ab991365ef51072db68aed46aa1 | [
"Apache-2.0"
] | 26 | 2019-07-15T02:38:22.000Z | 2021-12-01T04:14:17.000Z | TA-linode/bin/ta_linode/aob_py3/splunktalib/file_monitor.py | jriddle-linode/splunk-addon-linode | 5954acd12ef88ab991365ef51072db68aed46aa1 | [
"Apache-2.0"
] | 6 | 2019-07-14T17:44:06.000Z | 2020-11-17T17:33:23.000Z | # SPDX-FileCopyrightText: 2020 Splunk Inc.
#
# SPDX-License-Identifier: Apache-2.0
from builtins import object
import os.path as op
import traceback
from splunktalib.common import log
class FileMonitor(object):
def __init__(self, callback, files):
"""
:files: files to be monidtored with full path
"""
self._callback = callback
self._files = files
self.file_mtimes = {file_name: None for file_name in self._files}
for k in self.file_mtimes:
if not op.exists(k):
continue
try:
if not op.exists(k):
continue
self.file_mtimes[k] = op.getmtime(k)
except OSError:
log.logger.error(
"Getmtime for %s, failed: %s", k, traceback.format_exc()
)
def __call__(self):
return self.check_changes()
def check_changes(self):
log.logger.debug("Checking files=%s", self._files)
file_mtimes = self.file_mtimes
changed_files = []
for f, last_mtime in file_mtimes.items():
try:
if not op.exists(f):
continue
current_mtime = op.getmtime(f)
if current_mtime != last_mtime:
file_mtimes[f] = current_mtime
changed_files.append(f)
log.logger.info("Detect %s has changed", f)
except OSError:
pass
if changed_files:
if self._callback:
self._callback(changed_files)
return True
return False
| 27.683333 | 76 | 0.53823 |
from builtins import object
import os.path as op
import traceback
from splunktalib.common import log
class FileMonitor(object):
def __init__(self, callback, files):
self._callback = callback
self._files = files
self.file_mtimes = {file_name: None for file_name in self._files}
for k in self.file_mtimes:
if not op.exists(k):
continue
try:
if not op.exists(k):
continue
self.file_mtimes[k] = op.getmtime(k)
except OSError:
log.logger.error(
"Getmtime for %s, failed: %s", k, traceback.format_exc()
)
def __call__(self):
return self.check_changes()
def check_changes(self):
log.logger.debug("Checking files=%s", self._files)
file_mtimes = self.file_mtimes
changed_files = []
for f, last_mtime in file_mtimes.items():
try:
if not op.exists(f):
continue
current_mtime = op.getmtime(f)
if current_mtime != last_mtime:
file_mtimes[f] = current_mtime
changed_files.append(f)
log.logger.info("Detect %s has changed", f)
except OSError:
pass
if changed_files:
if self._callback:
self._callback(changed_files)
return True
return False
| true | true |
f71ea182be9148ef98e2c8611759cbd24edced2a | 1,333 | py | Python | copilot/views.py | Feudo-Laranja-ave-do-paraiso-DS-2021-2/copilot-api | 3f8c64cc2fafab1902dcd37f624fcff93f9494aa | [
"MIT"
] | null | null | null | copilot/views.py | Feudo-Laranja-ave-do-paraiso-DS-2021-2/copilot-api | 3f8c64cc2fafab1902dcd37f624fcff93f9494aa | [
"MIT"
] | 3 | 2022-03-10T21:40:58.000Z | 2022-03-15T02:14:50.000Z | copilot/views.py | Feudo-Laranja-ave-do-paraiso-DS-2021-2/copilot-api | 3f8c64cc2fafab1902dcd37f624fcff93f9494aa | [
"MIT"
] | null | null | null | from rest_framework.viewsets import ModelViewSet
from .models import Profile, Group
from .serializers import ProfileSerializers, GroupSerializers
from rest_framework.response import Response
from rest_framework.decorators import action
from itertools import chain
class ProfileViewSet(ModelViewSet):
serializer_class = ProfileSerializers
queryset = Profile.objects.all()
filterset_fields = ['id_dispositivo',]
class GroupViewSet(ModelViewSet):
serializer_class = GroupSerializers
queryset = Group.objects.all()
filterset_fields = ['token', ]
@action(methods=['post'], detail=True)
def adicionar_profile(self, request, pk):
profiles = request.data['ids']
group = Group.objects.get(id=pk)
old_profiles = group.profiles.all()
all_profiles = chain(old_profiles, profiles)
group.profiles.set(all_profiles)
group.save()
serializer = self.get_serializer(group)
return Response(serializer.data)
@action(methods=['delete'], detail=True)
def retirar_profile(self, request, pk):
profiles = request.data['ids']
group = Group.objects.get(id=pk)
for id in profiles:
group.profiles.remove(id)
group.save()
serializer = self.get_serializer(group)
return Response(serializer.data)
| 34.179487 | 61 | 0.702926 | from rest_framework.viewsets import ModelViewSet
from .models import Profile, Group
from .serializers import ProfileSerializers, GroupSerializers
from rest_framework.response import Response
from rest_framework.decorators import action
from itertools import chain
class ProfileViewSet(ModelViewSet):
serializer_class = ProfileSerializers
queryset = Profile.objects.all()
filterset_fields = ['id_dispositivo',]
class GroupViewSet(ModelViewSet):
serializer_class = GroupSerializers
queryset = Group.objects.all()
filterset_fields = ['token', ]
@action(methods=['post'], detail=True)
def adicionar_profile(self, request, pk):
profiles = request.data['ids']
group = Group.objects.get(id=pk)
old_profiles = group.profiles.all()
all_profiles = chain(old_profiles, profiles)
group.profiles.set(all_profiles)
group.save()
serializer = self.get_serializer(group)
return Response(serializer.data)
@action(methods=['delete'], detail=True)
def retirar_profile(self, request, pk):
profiles = request.data['ids']
group = Group.objects.get(id=pk)
for id in profiles:
group.profiles.remove(id)
group.save()
serializer = self.get_serializer(group)
return Response(serializer.data)
| true | true |
f71ea2c7e4a8e614b8fbb239298e8c5e740555c8 | 3,282 | py | Python | aliyun-python-sdk-bssopenapi/aliyunsdkbssopenapi/request/v20171214/QuerySettleBillRequest.py | ankitdobhal/aliyun-openapi-python-sdk | 991b1c2d91adc468480defc23ba790d4369cce7b | [
"Apache-2.0"
] | 1 | 2021-03-08T02:59:17.000Z | 2021-03-08T02:59:17.000Z | aliyun-python-sdk-bssopenapi/aliyunsdkbssopenapi/request/v20171214/QuerySettleBillRequest.py | bricklayer-Liu/aliyun-openapi-python-sdk | 20da2554de22679fc7c5462c483663e4d79512aa | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-bssopenapi/aliyunsdkbssopenapi/request/v20171214/QuerySettleBillRequest.py | bricklayer-Liu/aliyun-openapi-python-sdk | 20da2554de22679fc7c5462c483663e4d79512aa | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkbssopenapi.endpoint import endpoint_data
class QuerySettleBillRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'BssOpenApi', '2017-12-14', 'QuerySettleBill')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProductCode(self):
return self.get_query_params().get('ProductCode')
def set_ProductCode(self,ProductCode):
self.add_query_param('ProductCode',ProductCode)
def get_IsHideZeroCharge(self):
return self.get_query_params().get('IsHideZeroCharge')
def set_IsHideZeroCharge(self,IsHideZeroCharge):
self.add_query_param('IsHideZeroCharge',IsHideZeroCharge)
def get_IsDisplayLocalCurrency(self):
return self.get_query_params().get('IsDisplayLocalCurrency')
def set_IsDisplayLocalCurrency(self,IsDisplayLocalCurrency):
self.add_query_param('IsDisplayLocalCurrency',IsDisplayLocalCurrency)
def get_SubscriptionType(self):
return self.get_query_params().get('SubscriptionType')
def set_SubscriptionType(self,SubscriptionType):
self.add_query_param('SubscriptionType',SubscriptionType)
def get_BillingCycle(self):
return self.get_query_params().get('BillingCycle')
def set_BillingCycle(self,BillingCycle):
self.add_query_param('BillingCycle',BillingCycle)
def get_Type(self):
return self.get_query_params().get('Type')
def set_Type(self,Type):
self.add_query_param('Type',Type)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_BillOwnerId(self):
return self.get_query_params().get('BillOwnerId')
def set_BillOwnerId(self,BillOwnerId):
self.add_query_param('BillOwnerId',BillOwnerId)
def get_ProductType(self):
return self.get_query_params().get('ProductType')
def set_ProductType(self,ProductType):
self.add_query_param('ProductType',ProductType)
def get_NextToken(self):
return self.get_query_params().get('NextToken')
def set_NextToken(self,NextToken):
self.add_query_param('NextToken',NextToken)
def get_MaxResults(self):
return self.get_query_params().get('MaxResults')
def set_MaxResults(self,MaxResults):
self.add_query_param('MaxResults',MaxResults) | 33.489796 | 75 | 0.771176 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkbssopenapi.endpoint import endpoint_data
class QuerySettleBillRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'BssOpenApi', '2017-12-14', 'QuerySettleBill')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProductCode(self):
return self.get_query_params().get('ProductCode')
def set_ProductCode(self,ProductCode):
self.add_query_param('ProductCode',ProductCode)
def get_IsHideZeroCharge(self):
return self.get_query_params().get('IsHideZeroCharge')
def set_IsHideZeroCharge(self,IsHideZeroCharge):
self.add_query_param('IsHideZeroCharge',IsHideZeroCharge)
def get_IsDisplayLocalCurrency(self):
return self.get_query_params().get('IsDisplayLocalCurrency')
def set_IsDisplayLocalCurrency(self,IsDisplayLocalCurrency):
self.add_query_param('IsDisplayLocalCurrency',IsDisplayLocalCurrency)
def get_SubscriptionType(self):
return self.get_query_params().get('SubscriptionType')
def set_SubscriptionType(self,SubscriptionType):
self.add_query_param('SubscriptionType',SubscriptionType)
def get_BillingCycle(self):
return self.get_query_params().get('BillingCycle')
def set_BillingCycle(self,BillingCycle):
self.add_query_param('BillingCycle',BillingCycle)
def get_Type(self):
return self.get_query_params().get('Type')
def set_Type(self,Type):
self.add_query_param('Type',Type)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_BillOwnerId(self):
return self.get_query_params().get('BillOwnerId')
def set_BillOwnerId(self,BillOwnerId):
self.add_query_param('BillOwnerId',BillOwnerId)
def get_ProductType(self):
return self.get_query_params().get('ProductType')
def set_ProductType(self,ProductType):
self.add_query_param('ProductType',ProductType)
def get_NextToken(self):
return self.get_query_params().get('NextToken')
def set_NextToken(self,NextToken):
self.add_query_param('NextToken',NextToken)
def get_MaxResults(self):
return self.get_query_params().get('MaxResults')
def set_MaxResults(self,MaxResults):
self.add_query_param('MaxResults',MaxResults) | true | true |
f71ea31341a506b476c8ad73d75b95e72760cafc | 48 | py | Python | aiomatrix/dispatcher/storage/presence/engines/__init__.py | Forden/aiomatrix | d258076bae8eb776495b92be46ee9f4baec8d9a6 | [
"MIT"
] | 2 | 2021-10-29T18:07:08.000Z | 2021-11-19T00:25:43.000Z | aiomatrix/dispatcher/storage/presence/engines/__init__.py | Forden/aiomatrix | d258076bae8eb776495b92be46ee9f4baec8d9a6 | [
"MIT"
] | 1 | 2022-03-06T11:17:43.000Z | 2022-03-06T11:17:43.000Z | aiomatrix/dispatcher/storage/presence/engines/__init__.py | Forden/aiomatrix | d258076bae8eb776495b92be46ee9f4baec8d9a6 | [
"MIT"
] | null | null | null | from .sqlite import SqlitePresenceStorageEngine
| 24 | 47 | 0.895833 | from .sqlite import SqlitePresenceStorageEngine
| true | true |
f71ea377dea2ef46a9377c4904dbec66d0fe8968 | 7,373 | py | Python | tensorflow_datasets/summarization/summscreen/summscreen.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | 2 | 2022-02-14T09:51:39.000Z | 2022-02-14T13:27:49.000Z | tensorflow_datasets/summarization/summscreen/summscreen.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/summarization/summscreen/summscreen.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | 1 | 2020-12-13T22:11:33.000Z | 2020-12-13T22:11:33.000Z | # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SummScreen Summarization dataset, non-anonymized, non-tokenized version."""
import json
import os
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """
SummScreen Summarization dataset, non-anonymized, non-tokenized version.
Train/val/test splits and filtering are based on the final tokenized dataset,
but transcripts and recaps provided are based on the untokenized text.
There are two features:
- transcript: Full episode transcripts, each line of dialogue
separated by newlines
- recap: Recaps or summaries of episodes
"""
_CITATION = """\
@article{DBLP:journals/corr/abs-2104-07091,
author = {Mingda Chen and
Zewei Chu and
Sam Wiseman and
Kevin Gimpel},
title = {SummScreen: {A} Dataset for Abstractive Screenplay Summarization},
journal = {CoRR},
volume = {abs/2104.07091},
year = {2021},
url = {https://arxiv.org/abs/2104.07091},
archivePrefix = {arXiv},
eprint = {2104.07091},
timestamp = {Mon, 19 Apr 2021 16:45:47 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2104-07091.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_DL_URLS = {
# pylint: disable=line-too-long
'tokenized':
'https://drive.google.com/uc?export=download&id=1BvdIllGBo9d2-bzXQRzWuJXB04XPVmfF',
'untokenized':
'https://drive.google.com/uc?export=download&id=1tFpt32USOO2i1FWhtFTsyYyFzuRm2k36',
# pylint: enable=line-too-long
}
_RECAP = 'recap'
_TRANSCRIPT = 'transcript'
_RECAP_SOURCE_FULL_NAMES = {
'fd': 'ForeverDreaming',
'tms': 'TVMegaSite',
}
_SPLITS = ['train', 'dev', 'test']
def _load_file(path):
with tf.io.gfile.GFile(path, 'r') as f:
return f.read()
def _load_json(path):
return json.loads(_load_file(path))
def _load_jsonl(path):
return [json.loads(line) for line in _load_file(path).strip().splitlines()]
def _get_filenames_dict(tokenized_path, recap_source: str):
"""Get dictionary of filenames for each split."""
filenames_dict = {}
for split in _SPLITS:
tokenized_data = _load_jsonl(
os.path.join(tokenized_path, 'SummScreen',
_RECAP_SOURCE_FULL_NAMES[recap_source],
f'{recap_source}_{split}.json'))
filenames_dict[split] = [row['filename'] for row in tokenized_data]
return filenames_dict
def _get_paths_dict(untokenized_path, recap_source, filenames_dict):
"""Get dictionary of example paths for each split."""
paths_dict = {}
for split, filenames in filenames_dict.items():
paths_dict[split] = [
os.path.join(untokenized_path, 'SummScreen_raw', recap_source, filename)
for filename in filenames
]
return paths_dict
class SummscreenConfig(tfds.core.BuilderConfig):
"""BuilderConfig for Summscreen."""
def __init__(self, *, recap_source=None, **kwargs):
"""BuilderConfig for Summscreen.
Args:
recap_source: str. The directory for the source of recaps to read.
**kwargs: keyword arguments forwarded to super.
"""
super(SummscreenConfig, self).__init__(**kwargs)
self.recap_source = recap_source
class Summscreen(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for non-tokenized, non-anonymized SummScreen dataset."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
BUILDER_CONFIGS = [
SummscreenConfig(
name='fd',
description='ForeverDreaming',
recap_source='fd',
),
SummscreenConfig(
name='tms',
description='TVMegaSite',
recap_source='tms',
),
]
def _info(self):
# Should return a tfds.core.DatasetInfo object
if self._builder_config.recap_source == 'fd':
features = tfds.features.FeaturesDict({
_TRANSCRIPT: tfds.features.Text(),
_RECAP: tfds.features.Text(),
'episode_number': tfds.features.Text(),
'episode_title': tfds.features.Text(),
'show_title': tfds.features.Text(),
'transcript_author': tfds.features.Text(),
})
elif self._builder_config.recap_source == 'tms':
features = tfds.features.FeaturesDict({
_TRANSCRIPT:
tfds.features.Text(),
_RECAP:
tfds.features.Text(),
'episode_summary':
tfds.features.Text(),
'show_title':
tfds.features.Text(),
'transcript_author':
tfds.features.Tensor(shape=(None,), dtype=tf.string),
'recap_author':
tfds.features.Text(),
})
else:
raise KeyError(
f'Unknown recap_source {self._builder_config.recap_source}')
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=features,
supervised_keys=(_TRANSCRIPT, _RECAP),
homepage='https://github.com/mingdachen/SummScreen',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_paths = dl_manager.download_and_extract(_DL_URLS)
filenames_dict = _get_filenames_dict(
tokenized_path=dl_paths['tokenized'],
recap_source=self._builder_config.recap_source,
)
paths_dict = _get_paths_dict(
untokenized_path=dl_paths['untokenized'],
recap_source=self._builder_config.recap_source,
filenames_dict=filenames_dict,
)
return {
'train': self._generate_examples(paths=paths_dict['train']),
'validation': self._generate_examples(paths=paths_dict['dev']),
'test': self._generate_examples(paths=paths_dict['test']),
}
def _generate_examples(self, paths):
for path in paths:
example = _load_json(path)
fname = os.path.basename(path)
if self._builder_config.recap_source == 'fd':
yield fname, {
_TRANSCRIPT: '\n'.join(example['Transcript']),
_RECAP: '\n'.join(example['Recap']),
'episode_number': example['Episode Number'],
'episode_title': example['Episode Title'],
'show_title': example['Show Title'],
'transcript_author': example['Transcript Author'],
}
elif self._builder_config.recap_source == 'tms':
yield fname, {
_TRANSCRIPT: '\n'.join(example['Transcript']),
_RECAP: '\n'.join(example['Recap']),
'episode_summary': '\n'.join(example['Episode Summary']),
'show_title': example['Show Title'],
'transcript_author': example['Transcript Author'],
'recap_author': example['Recap Author'],
}
else:
raise KeyError(
f'Unknown recap_source {self._builder_config.recap_source}')
| 32.623894 | 91 | 0.655907 |
import json
import os
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """
SummScreen Summarization dataset, non-anonymized, non-tokenized version.
Train/val/test splits and filtering are based on the final tokenized dataset,
but transcripts and recaps provided are based on the untokenized text.
There are two features:
- transcript: Full episode transcripts, each line of dialogue
separated by newlines
- recap: Recaps or summaries of episodes
"""
_CITATION = """\
@article{DBLP:journals/corr/abs-2104-07091,
author = {Mingda Chen and
Zewei Chu and
Sam Wiseman and
Kevin Gimpel},
title = {SummScreen: {A} Dataset for Abstractive Screenplay Summarization},
journal = {CoRR},
volume = {abs/2104.07091},
year = {2021},
url = {https://arxiv.org/abs/2104.07091},
archivePrefix = {arXiv},
eprint = {2104.07091},
timestamp = {Mon, 19 Apr 2021 16:45:47 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2104-07091.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_DL_URLS = {
'tokenized':
'https://drive.google.com/uc?export=download&id=1BvdIllGBo9d2-bzXQRzWuJXB04XPVmfF',
'untokenized':
'https://drive.google.com/uc?export=download&id=1tFpt32USOO2i1FWhtFTsyYyFzuRm2k36',
}
_RECAP = 'recap'
_TRANSCRIPT = 'transcript'
_RECAP_SOURCE_FULL_NAMES = {
'fd': 'ForeverDreaming',
'tms': 'TVMegaSite',
}
_SPLITS = ['train', 'dev', 'test']
def _load_file(path):
with tf.io.gfile.GFile(path, 'r') as f:
return f.read()
def _load_json(path):
return json.loads(_load_file(path))
def _load_jsonl(path):
return [json.loads(line) for line in _load_file(path).strip().splitlines()]
def _get_filenames_dict(tokenized_path, recap_source: str):
filenames_dict = {}
for split in _SPLITS:
tokenized_data = _load_jsonl(
os.path.join(tokenized_path, 'SummScreen',
_RECAP_SOURCE_FULL_NAMES[recap_source],
f'{recap_source}_{split}.json'))
filenames_dict[split] = [row['filename'] for row in tokenized_data]
return filenames_dict
def _get_paths_dict(untokenized_path, recap_source, filenames_dict):
paths_dict = {}
for split, filenames in filenames_dict.items():
paths_dict[split] = [
os.path.join(untokenized_path, 'SummScreen_raw', recap_source, filename)
for filename in filenames
]
return paths_dict
class SummscreenConfig(tfds.core.BuilderConfig):
def __init__(self, *, recap_source=None, **kwargs):
super(SummscreenConfig, self).__init__(**kwargs)
self.recap_source = recap_source
class Summscreen(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
BUILDER_CONFIGS = [
SummscreenConfig(
name='fd',
description='ForeverDreaming',
recap_source='fd',
),
SummscreenConfig(
name='tms',
description='TVMegaSite',
recap_source='tms',
),
]
def _info(self):
if self._builder_config.recap_source == 'fd':
features = tfds.features.FeaturesDict({
_TRANSCRIPT: tfds.features.Text(),
_RECAP: tfds.features.Text(),
'episode_number': tfds.features.Text(),
'episode_title': tfds.features.Text(),
'show_title': tfds.features.Text(),
'transcript_author': tfds.features.Text(),
})
elif self._builder_config.recap_source == 'tms':
features = tfds.features.FeaturesDict({
_TRANSCRIPT:
tfds.features.Text(),
_RECAP:
tfds.features.Text(),
'episode_summary':
tfds.features.Text(),
'show_title':
tfds.features.Text(),
'transcript_author':
tfds.features.Tensor(shape=(None,), dtype=tf.string),
'recap_author':
tfds.features.Text(),
})
else:
raise KeyError(
f'Unknown recap_source {self._builder_config.recap_source}')
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=features,
supervised_keys=(_TRANSCRIPT, _RECAP),
homepage='https://github.com/mingdachen/SummScreen',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_paths = dl_manager.download_and_extract(_DL_URLS)
filenames_dict = _get_filenames_dict(
tokenized_path=dl_paths['tokenized'],
recap_source=self._builder_config.recap_source,
)
paths_dict = _get_paths_dict(
untokenized_path=dl_paths['untokenized'],
recap_source=self._builder_config.recap_source,
filenames_dict=filenames_dict,
)
return {
'train': self._generate_examples(paths=paths_dict['train']),
'validation': self._generate_examples(paths=paths_dict['dev']),
'test': self._generate_examples(paths=paths_dict['test']),
}
def _generate_examples(self, paths):
for path in paths:
example = _load_json(path)
fname = os.path.basename(path)
if self._builder_config.recap_source == 'fd':
yield fname, {
_TRANSCRIPT: '\n'.join(example['Transcript']),
_RECAP: '\n'.join(example['Recap']),
'episode_number': example['Episode Number'],
'episode_title': example['Episode Title'],
'show_title': example['Show Title'],
'transcript_author': example['Transcript Author'],
}
elif self._builder_config.recap_source == 'tms':
yield fname, {
_TRANSCRIPT: '\n'.join(example['Transcript']),
_RECAP: '\n'.join(example['Recap']),
'episode_summary': '\n'.join(example['Episode Summary']),
'show_title': example['Show Title'],
'transcript_author': example['Transcript Author'],
'recap_author': example['Recap Author'],
}
else:
raise KeyError(
f'Unknown recap_source {self._builder_config.recap_source}')
| true | true |
f71ea5d93843377ff6b080e7d44cc423b011871b | 2,783 | py | Python | components_library/cachehierarchies/abstract_cache_hierarchy.py | zinob15/gem5 | fb2946e314ea9e63c7696ee8023150ed13956582 | [
"BSD-3-Clause"
] | 19 | 2018-07-20T15:08:50.000Z | 2022-03-26T16:15:59.000Z | components_library/cachehierarchies/abstract_cache_hierarchy.py | zinob15/gem5 | fb2946e314ea9e63c7696ee8023150ed13956582 | [
"BSD-3-Clause"
] | 148 | 2018-07-20T00:58:36.000Z | 2021-11-16T01:52:33.000Z | components_library/cachehierarchies/abstract_cache_hierarchy.py | zinob15/gem5 | fb2946e314ea9e63c7696ee8023150ed13956582 | [
"BSD-3-Clause"
] | 10 | 2019-01-10T03:01:30.000Z | 2022-01-21T18:36:18.000Z | # Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import ABCMeta, abstractmethod
from ..boards.abstract_board import AbstractBoard
from m5.objects import SubSystem
class AbstractCacheHierarchy(SubSystem):
__metaclass__ = ABCMeta
def __init__(self):
super(AbstractCacheHierarchy, self).__init__()
"""
A Cache Hierarchy incorporates any system components which manages
communicaton between the processor and memory. E.g., Caches, the MemBus,
MMU, and the MMU Cache.
All Cache Hierarchies must have this as a base class.
"""
@abstractmethod
def incorporate_cache(self, board: AbstractBoard) -> None:
"""
Incorporates the caches into a board.
Each specific hierarchy needs to implement this function and will be
unique for each setup.
:param board: The board in which the cache heirarchy is to be
incorporated.
:type board: AbstractBoard
"""
raise NotImplementedError
@abstractmethod
def is_ruby(self) -> bool:
"""
Specifies whether this cache hierarchy is using the Ruby memory system
or not.
:returns: True if the cache hierarchy is ruby. Otherwise False.
"""
raise NotImplementedError
| 38.123288 | 78 | 0.743442 |
from abc import ABCMeta, abstractmethod
from ..boards.abstract_board import AbstractBoard
from m5.objects import SubSystem
class AbstractCacheHierarchy(SubSystem):
__metaclass__ = ABCMeta
def __init__(self):
super(AbstractCacheHierarchy, self).__init__()
@abstractmethod
def incorporate_cache(self, board: AbstractBoard) -> None:
raise NotImplementedError
@abstractmethod
def is_ruby(self) -> bool:
raise NotImplementedError
| true | true |
f71ea6560181038415ecc054dc22addd8cc08dd2 | 13,031 | py | Python | mt3/datasets.py | AK391/mt3 | e03242bdbb877c64677024adb3b9eb915d9929d6 | [
"Apache-2.0"
] | 1 | 2022-01-04T04:37:07.000Z | 2022-01-04T04:37:07.000Z | mt3/datasets.py | dogdogshit/mt3 | d43c95ccbf9caa08d18e985ca2f2fc7e286a2f66 | [
"Apache-2.0"
] | null | null | null | mt3/datasets.py | dogdogshit/mt3 | d43c95ccbf9caa08d18e985ca2f2fc7e286a2f66 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset configurations."""
import dataclasses
from typing import Mapping, Sequence, Union
from mt3 import note_sequences
import tensorflow as tf
@dataclasses.dataclass
class InferEvalSplit:
# key in dictionary containing all dataset splits
name: str
# task name suffix (each eval split is a separate task)
suffix: str
# whether or not to include in the mixture of all eval tasks
include_in_mixture: bool = True
@dataclasses.dataclass
class DatasetConfig:
"""Configuration for a transcription dataset."""
# dataset name
name: str
# mapping from split name to path
paths: Mapping[str, str]
# mapping from feature name to feature
features: Mapping[str, Union[tf.io.FixedLenFeature,
tf.io.FixedLenSequenceFeature]]
# training split name
train_split: str
# training eval split name
train_eval_split: str
# list of infer eval split specs
infer_eval_splits: Sequence[InferEvalSplit]
# list of track specs to be used for metrics
track_specs: Sequence[note_sequences.TrackSpec] = dataclasses.field(
default_factory=list)
MAESTROV1_CONFIG = DatasetConfig(
name='maestrov1',
paths={
'train':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_train.tfrecord-?????-of-00010',
'train_subset':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_train.tfrecord-00002-of-00010',
'validation':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_validation.tfrecord-?????-of-00010',
'validation_subset':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_validation.tfrecord-0000[06]-of-00010',
'test':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_test.tfrecord-?????-of-00010'
},
features={
'audio': tf.io.FixedLenFeature([], dtype=tf.string),
'sequence': tf.io.FixedLenFeature([], dtype=tf.string),
'id': tf.io.FixedLenFeature([], dtype=tf.string)
},
train_split='train',
train_eval_split='validation_subset',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train_full',
include_in_mixture=False),
InferEvalSplit(name='train_subset', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation_full',
include_in_mixture=False),
InferEvalSplit(name='validation_subset', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
])
MAESTROV3_CONFIG = DatasetConfig(
name='maestrov3',
paths={
'train':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_train.tfrecord-?????-of-00025',
'train_subset':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_train.tfrecord-00004-of-00025',
'validation':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_validation.tfrecord-?????-of-00025',
'validation_subset':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_validation.tfrecord-0002?-of-00025',
'test':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_test.tfrecord-?????-of-00025'
},
features={
'audio': tf.io.FixedLenFeature([], dtype=tf.string),
'sequence': tf.io.FixedLenFeature([], dtype=tf.string),
'id': tf.io.FixedLenFeature([], dtype=tf.string)
},
train_split='train',
train_eval_split='validation_subset',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train_full',
include_in_mixture=False),
InferEvalSplit(name='train_subset', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation_full',
include_in_mixture=False),
InferEvalSplit(name='validation_subset', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
])
GUITARSET_CONFIG = DatasetConfig(
name='guitarset',
paths={
'train':
'gs://mt3/data/datasets/guitarset/train.tfrecord-?????-of-00019',
'validation':
'gs://mt3/data/datasets/guitarset/validation.tfrecord-?????-of-00006',
},
features={
'sequence': tf.io.FixedLenFeature([], dtype=tf.string),
'audio': tf.io.FixedLenFeature([], dtype=tf.string),
'velocity_range': tf.io.FixedLenFeature([], dtype=tf.string),
'id': tf.io.FixedLenFeature([], dtype=tf.string),
},
train_split='train',
train_eval_split='validation',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation'),
])
URMP_CONFIG = DatasetConfig(
name='urmp',
paths={
'train': 'gs://mt3/data/datasets/urmp/train.tfrecord',
'validation': 'gs://mt3/data/datasets/urmp/validation.tfrecord',
},
features={
'id': tf.io.FixedLenFeature([], dtype=tf.string),
'tracks': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'inst_names': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
'audio': tf.io.FixedLenFeature([], dtype=tf.string),
'sequence': tf.io.FixedLenFeature([], dtype=tf.string),
'instrument_sequences': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
},
train_split='train',
train_eval_split='validation',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation')
])
MUSICNET_CONFIG = DatasetConfig(
name='musicnet',
paths={
'train':
'gs://mt3/data/datasets/musicnet/musicnet-train.tfrecord-?????-of-00036',
'validation':
'gs://mt3/data/datasets/musicnet/musicnet-validation.tfrecord-?????-of-00005',
'test':
'gs://mt3/data/datasets/musicnet/musicnet-test.tfrecord-?????-of-00003'
},
features={
'id': tf.io.FixedLenFeature([], dtype=tf.string),
'sample_rate': tf.io.FixedLenFeature([], dtype=tf.float32),
'audio': tf.io.FixedLenSequenceFeature(
[], dtype=tf.float32, allow_missing=True),
'sequence': tf.io.FixedLenFeature([], dtype=tf.string)
},
train_split='train',
train_eval_split='validation',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
])
CERBERUS4_CONFIG = DatasetConfig(
name='cerberus4',
paths={
'train':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_train_bass:drums:guitar:piano.tfrecord-?????-of-00286',
'train_subset':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_train_bass:drums:guitar:piano.tfrecord-00000-of-00286',
'validation':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_validation_bass:drums:guitar:piano.tfrecord-?????-of-00212',
'validation_subset':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_validation_bass:drums:guitar:piano.tfrecord-0000?-of-00212',
'test':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_test_bass:drums:guitar:piano.tfrecord-?????-of-00106'
},
features={
'audio_sample_rate': tf.io.FixedLenFeature([], dtype=tf.int64),
'inst_names': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
'midi_class': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'mix': tf.io.FixedLenSequenceFeature(
[], dtype=tf.float32, allow_missing=True),
'note_sequences': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
'plugin_name': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'program_num': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'slakh_class': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'src_ids': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
'stems': tf.io.FixedLenSequenceFeature(
[], dtype=tf.float32, allow_missing=True),
'stems_shape': tf.io.FixedLenFeature([2], dtype=tf.int64),
'target_type': tf.io.FixedLenFeature([], dtype=tf.string),
'track_id': tf.io.FixedLenFeature([], dtype=tf.string),
},
train_split='train',
train_eval_split='validation_subset',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train_full',
include_in_mixture=False),
InferEvalSplit(name='train_subset', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation_full',
include_in_mixture=False),
InferEvalSplit(name='validation_subset', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
],
track_specs=[
note_sequences.TrackSpec('bass', program=32),
note_sequences.TrackSpec('drums', is_drum=True),
note_sequences.TrackSpec('guitar', program=24),
note_sequences.TrackSpec('piano', program=0)
])
SLAKH_CONFIG = DatasetConfig(
name='slakh',
paths={
'train':
'gs://mt3/data/datasets/slakh/slakh_multi_full_subsets_10_train_all_inst.tfrecord-?????-of-02307',
'train_subset':
'gs://mt3/data/datasets/slakh/slakh_multi_full_subsets_10_train_all_inst.tfrecord-00000-of-02307',
'validation':
'gs://mt3/data/datasets/slakh/slakh_multi_full_validation_all_inst.tfrecord-?????-of-00168',
'validation_subset':
'gs://mt3/data/datasets/slakh/slakh_multi_full_validation_all_inst.tfrecord-0000?-of-00168',
'test':
'gs://mt3/data/datasets/slakh/slakh_multi_full_test_all_inst.tfrecord-?????-of-00109'
},
features={
'audio_sample_rate': tf.io.FixedLenFeature([], dtype=tf.int64),
'inst_names': tf.io.FixedLenSequenceFeature([], dtype=tf.string,
allow_missing=True),
'midi_class': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,
allow_missing=True),
'mix': tf.io.FixedLenSequenceFeature([], dtype=tf.float32,
allow_missing=True),
'note_sequences': tf.io.FixedLenSequenceFeature([], dtype=tf.string,
allow_missing=True),
'plugin_name': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,
allow_missing=True),
'program_num': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,
allow_missing=True),
'slakh_class': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,
allow_missing=True),
'src_ids': tf.io.FixedLenSequenceFeature([], dtype=tf.string,
allow_missing=True),
'stems': tf.io.FixedLenSequenceFeature([], dtype=tf.float32,
allow_missing=True),
'stems_shape': tf.io.FixedLenFeature([2], dtype=tf.int64),
'target_type': tf.io.FixedLenFeature([], dtype=tf.string),
'track_id': tf.io.FixedLenFeature([], dtype=tf.string),
},
train_split='train',
train_eval_split='validation_subset',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train_full',
include_in_mixture=False),
InferEvalSplit(name='train_subset', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation_full',
include_in_mixture=False),
InferEvalSplit(name='validation_subset', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
])
| 43.875421 | 127 | 0.634027 |
import dataclasses
from typing import Mapping, Sequence, Union
from mt3 import note_sequences
import tensorflow as tf
@dataclasses.dataclass
class InferEvalSplit:
name: str
suffix: str
include_in_mixture: bool = True
@dataclasses.dataclass
class DatasetConfig:
name: str
paths: Mapping[str, str]
features: Mapping[str, Union[tf.io.FixedLenFeature,
tf.io.FixedLenSequenceFeature]]
train_split: str
train_eval_split: str
infer_eval_splits: Sequence[InferEvalSplit]
track_specs: Sequence[note_sequences.TrackSpec] = dataclasses.field(
default_factory=list)
MAESTROV1_CONFIG = DatasetConfig(
name='maestrov1',
paths={
'train':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_train.tfrecord-?????-of-00010',
'train_subset':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_train.tfrecord-00002-of-00010',
'validation':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_validation.tfrecord-?????-of-00010',
'validation_subset':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_validation.tfrecord-0000[06]-of-00010',
'test':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_test.tfrecord-?????-of-00010'
},
features={
'audio': tf.io.FixedLenFeature([], dtype=tf.string),
'sequence': tf.io.FixedLenFeature([], dtype=tf.string),
'id': tf.io.FixedLenFeature([], dtype=tf.string)
},
train_split='train',
train_eval_split='validation_subset',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train_full',
include_in_mixture=False),
InferEvalSplit(name='train_subset', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation_full',
include_in_mixture=False),
InferEvalSplit(name='validation_subset', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
])
MAESTROV3_CONFIG = DatasetConfig(
name='maestrov3',
paths={
'train':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_train.tfrecord-?????-of-00025',
'train_subset':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_train.tfrecord-00004-of-00025',
'validation':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_validation.tfrecord-?????-of-00025',
'validation_subset':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_validation.tfrecord-0002?-of-00025',
'test':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_test.tfrecord-?????-of-00025'
},
features={
'audio': tf.io.FixedLenFeature([], dtype=tf.string),
'sequence': tf.io.FixedLenFeature([], dtype=tf.string),
'id': tf.io.FixedLenFeature([], dtype=tf.string)
},
train_split='train',
train_eval_split='validation_subset',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train_full',
include_in_mixture=False),
InferEvalSplit(name='train_subset', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation_full',
include_in_mixture=False),
InferEvalSplit(name='validation_subset', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
])
GUITARSET_CONFIG = DatasetConfig(
name='guitarset',
paths={
'train':
'gs://mt3/data/datasets/guitarset/train.tfrecord-?????-of-00019',
'validation':
'gs://mt3/data/datasets/guitarset/validation.tfrecord-?????-of-00006',
},
features={
'sequence': tf.io.FixedLenFeature([], dtype=tf.string),
'audio': tf.io.FixedLenFeature([], dtype=tf.string),
'velocity_range': tf.io.FixedLenFeature([], dtype=tf.string),
'id': tf.io.FixedLenFeature([], dtype=tf.string),
},
train_split='train',
train_eval_split='validation',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation'),
])
URMP_CONFIG = DatasetConfig(
name='urmp',
paths={
'train': 'gs://mt3/data/datasets/urmp/train.tfrecord',
'validation': 'gs://mt3/data/datasets/urmp/validation.tfrecord',
},
features={
'id': tf.io.FixedLenFeature([], dtype=tf.string),
'tracks': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'inst_names': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
'audio': tf.io.FixedLenFeature([], dtype=tf.string),
'sequence': tf.io.FixedLenFeature([], dtype=tf.string),
'instrument_sequences': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
},
train_split='train',
train_eval_split='validation',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation')
])
MUSICNET_CONFIG = DatasetConfig(
name='musicnet',
paths={
'train':
'gs://mt3/data/datasets/musicnet/musicnet-train.tfrecord-?????-of-00036',
'validation':
'gs://mt3/data/datasets/musicnet/musicnet-validation.tfrecord-?????-of-00005',
'test':
'gs://mt3/data/datasets/musicnet/musicnet-test.tfrecord-?????-of-00003'
},
features={
'id': tf.io.FixedLenFeature([], dtype=tf.string),
'sample_rate': tf.io.FixedLenFeature([], dtype=tf.float32),
'audio': tf.io.FixedLenSequenceFeature(
[], dtype=tf.float32, allow_missing=True),
'sequence': tf.io.FixedLenFeature([], dtype=tf.string)
},
train_split='train',
train_eval_split='validation',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
])
CERBERUS4_CONFIG = DatasetConfig(
name='cerberus4',
paths={
'train':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_train_bass:drums:guitar:piano.tfrecord-?????-of-00286',
'train_subset':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_train_bass:drums:guitar:piano.tfrecord-00000-of-00286',
'validation':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_validation_bass:drums:guitar:piano.tfrecord-?????-of-00212',
'validation_subset':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_validation_bass:drums:guitar:piano.tfrecord-0000?-of-00212',
'test':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_test_bass:drums:guitar:piano.tfrecord-?????-of-00106'
},
features={
'audio_sample_rate': tf.io.FixedLenFeature([], dtype=tf.int64),
'inst_names': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
'midi_class': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'mix': tf.io.FixedLenSequenceFeature(
[], dtype=tf.float32, allow_missing=True),
'note_sequences': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
'plugin_name': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'program_num': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'slakh_class': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'src_ids': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
'stems': tf.io.FixedLenSequenceFeature(
[], dtype=tf.float32, allow_missing=True),
'stems_shape': tf.io.FixedLenFeature([2], dtype=tf.int64),
'target_type': tf.io.FixedLenFeature([], dtype=tf.string),
'track_id': tf.io.FixedLenFeature([], dtype=tf.string),
},
train_split='train',
train_eval_split='validation_subset',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train_full',
include_in_mixture=False),
InferEvalSplit(name='train_subset', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation_full',
include_in_mixture=False),
InferEvalSplit(name='validation_subset', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
],
track_specs=[
note_sequences.TrackSpec('bass', program=32),
note_sequences.TrackSpec('drums', is_drum=True),
note_sequences.TrackSpec('guitar', program=24),
note_sequences.TrackSpec('piano', program=0)
])
SLAKH_CONFIG = DatasetConfig(
name='slakh',
paths={
'train':
'gs://mt3/data/datasets/slakh/slakh_multi_full_subsets_10_train_all_inst.tfrecord-?????-of-02307',
'train_subset':
'gs://mt3/data/datasets/slakh/slakh_multi_full_subsets_10_train_all_inst.tfrecord-00000-of-02307',
'validation':
'gs://mt3/data/datasets/slakh/slakh_multi_full_validation_all_inst.tfrecord-?????-of-00168',
'validation_subset':
'gs://mt3/data/datasets/slakh/slakh_multi_full_validation_all_inst.tfrecord-0000?-of-00168',
'test':
'gs://mt3/data/datasets/slakh/slakh_multi_full_test_all_inst.tfrecord-?????-of-00109'
},
features={
'audio_sample_rate': tf.io.FixedLenFeature([], dtype=tf.int64),
'inst_names': tf.io.FixedLenSequenceFeature([], dtype=tf.string,
allow_missing=True),
'midi_class': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,
allow_missing=True),
'mix': tf.io.FixedLenSequenceFeature([], dtype=tf.float32,
allow_missing=True),
'note_sequences': tf.io.FixedLenSequenceFeature([], dtype=tf.string,
allow_missing=True),
'plugin_name': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,
allow_missing=True),
'program_num': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,
allow_missing=True),
'slakh_class': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,
allow_missing=True),
'src_ids': tf.io.FixedLenSequenceFeature([], dtype=tf.string,
allow_missing=True),
'stems': tf.io.FixedLenSequenceFeature([], dtype=tf.float32,
allow_missing=True),
'stems_shape': tf.io.FixedLenFeature([2], dtype=tf.int64),
'target_type': tf.io.FixedLenFeature([], dtype=tf.string),
'track_id': tf.io.FixedLenFeature([], dtype=tf.string),
},
train_split='train',
train_eval_split='validation_subset',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train_full',
include_in_mixture=False),
InferEvalSplit(name='train_subset', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation_full',
include_in_mixture=False),
InferEvalSplit(name='validation_subset', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
])
| true | true |
f71ea6594940687e4ec4ac3a81683903513d4887 | 6,303 | py | Python | repo/script.module.liveresolver/lib/js2py/constructors/jsobject.py | Hades01/Addons | 710da97ac850197498a3cd64be1811c593610add | [
"Apache-2.0"
] | 3 | 2020-03-03T13:21:44.000Z | 2021-07-21T09:53:31.000Z | repo/script.module.liveresolver/lib/js2py/constructors/jsobject.py | Hades01/Addons | 710da97ac850197498a3cd64be1811c593610add | [
"Apache-2.0"
] | null | null | null | repo/script.module.liveresolver/lib/js2py/constructors/jsobject.py | Hades01/Addons | 710da97ac850197498a3cd64be1811c593610add | [
"Apache-2.0"
] | 2 | 2020-04-01T22:11:12.000Z | 2020-05-07T23:54:52.000Z | from js2py.base import *
#todo Double check everything is OK
@Js
def Object():
val = arguments.get('0')
if val.is_null() or val.is_undefined():
return PyJsObject(prototype=ObjectPrototype)
return val.to_object()
@Js
def object_constructor():
if len(arguments):
val = arguments.get('0')
if val.TYPE=='Object':
#Implementation dependent, but my will simply return :)
return val
elif val.TYPE in ['Number', 'String', 'Boolean']:
return val.to_object()
return PyJsObject(prototype=ObjectPrototype)
Object.create = object_constructor
class ObjectMethods:
def getPrototypeOf(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.getPrototypeOf called on non-object')
return null if obj.prototype is None else obj.prototype
def getOwnPropertyDescriptor (obj, prop):
if not obj.is_object():
raise MakeError('TypeError', 'Object.getOwnPropertyDescriptor called on non-object')
return obj.own.get(prop.to_string().value) # will return undefined if we dont have this prop
def getOwnPropertyNames(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.getOwnPropertyDescriptor called on non-object')
return obj.own.keys()
def create(obj):
if not (obj.is_object() or obj.is_null()):
raise MakeError('TypeError', 'Object prototype may only be an Object or null')
temp = PyJsObject(prototype=(None if obj.is_null() else obj))
if len(arguments)>1 and not arguments[1].is_undefined():
ObjectMethods.defineProperties.__func__(temp, arguments[1])
return temp
def defineProperty(obj, prop, attrs):
if not obj.is_object():
raise MakeError('TypeError', 'Object.defineProperty called on non-object')
name = prop.to_string().value
if not obj.define_own_property(name, ToPropertyDescriptor(attrs)):
raise MakeError('TypeError', 'Cannot redefine property: %s' % name)
return obj
def defineProperties(obj, properties):
if not obj.is_object():
raise MakeError('TypeError', 'Object.defineProperties called on non-object')
props = properties.to_object()
for name in props:
desc = ToPropertyDescriptor(props.get(name.value))
if not obj.define_own_property(name.value, desc):
raise MakeError('TypeError', 'Failed to define own property: %s'%name.value)
return obj
def seal(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.seal called on non-object')
for desc in obj.own.values():
desc['configurable'] = False
obj.extensible = False
return obj
def freeze(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.freeze called on non-object')
for desc in obj.own.values():
desc['configurable'] = False
if is_data_descriptor(desc):
desc['writable'] = False
obj.extensible = False
return obj
def preventExtensions(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.preventExtensions on non-object')
obj.extensible = False
return obj
def isSealed(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.isSealed called on non-object')
if obj.extensible:
return False
for desc in obj.own.values():
if desc['configurable']:
return False
return True
def isFrozen(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.isFrozen called on non-object')
if obj.extensible:
return False
for desc in obj.own.values():
if desc['configurable']:
return False
if is_data_descriptor(desc) and desc['writable']:
return False
return True
def isExtensible(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.isExtensible called on non-object')
return obj.extensible
def keys(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.keys called on non-object')
return [e for e,d in obj.own.iteritems() if d.get('enumerable')]
# add methods attached to Object constructor
fill_prototype(Object, ObjectMethods, default_attrs)
# add constructor to prototype
fill_in_props(ObjectPrototype, {'constructor':Object}, default_attrs)
# add prototype property to the constructor.
Object.define_own_property('prototype', {'value': ObjectPrototype,
'enumerable': False,
'writable': False,
'configurable': False})
# some utility functions:
def ToPropertyDescriptor(obj): # page 38 (50 absolute)
if obj.TYPE!='Object':
raise MakeError('TypeError', 'Can\'t convert non-object to property descriptor')
desc = {}
if obj.has_property('enumerable'):
desc['enumerable'] = obj.get('enumerable').to_boolean().value
if obj.has_property('configurable'):
desc['configurable'] = obj.get('configurable').to_boolean().value
if obj.has_property('value'):
desc['value'] = obj.get('value')
if obj.has_property('writable'):
desc['writable'] = obj.get('writable').to_boolean().value
if obj.has_property('get'):
cand = obj.get('get')
if not (cand.is_undefined() or cand.is_callable()):
raise MakeError('TypeError', 'Invalid getter (it has to be a function or undefined)')
desc['get'] = cand
if obj.has_property('set'):
cand = obj.get('set')
if not (cand.is_undefined() or cand.is_callable()):
raise MakeError('TypeError', 'Invalid setter (it has to be a function or undefined)')
desc['set'] = cand
if ('get' in desc or 'set' in desc) and ('value' in desc or 'writable' in desc):
raise MakeError('TypeError', 'Invalid property. A property cannot both have accessors and be writable or have a value.')
return desc
| 37.517857 | 129 | 0.621609 | from js2py.base import *
@Js
def Object():
val = arguments.get('0')
if val.is_null() or val.is_undefined():
return PyJsObject(prototype=ObjectPrototype)
return val.to_object()
@Js
def object_constructor():
if len(arguments):
val = arguments.get('0')
if val.TYPE=='Object':
return val
elif val.TYPE in ['Number', 'String', 'Boolean']:
return val.to_object()
return PyJsObject(prototype=ObjectPrototype)
Object.create = object_constructor
class ObjectMethods:
def getPrototypeOf(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.getPrototypeOf called on non-object')
return null if obj.prototype is None else obj.prototype
def getOwnPropertyDescriptor (obj, prop):
if not obj.is_object():
raise MakeError('TypeError', 'Object.getOwnPropertyDescriptor called on non-object')
return obj.own.get(prop.to_string().value)
def getOwnPropertyNames(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.getOwnPropertyDescriptor called on non-object')
return obj.own.keys()
def create(obj):
if not (obj.is_object() or obj.is_null()):
raise MakeError('TypeError', 'Object prototype may only be an Object or null')
temp = PyJsObject(prototype=(None if obj.is_null() else obj))
if len(arguments)>1 and not arguments[1].is_undefined():
ObjectMethods.defineProperties.__func__(temp, arguments[1])
return temp
def defineProperty(obj, prop, attrs):
if not obj.is_object():
raise MakeError('TypeError', 'Object.defineProperty called on non-object')
name = prop.to_string().value
if not obj.define_own_property(name, ToPropertyDescriptor(attrs)):
raise MakeError('TypeError', 'Cannot redefine property: %s' % name)
return obj
def defineProperties(obj, properties):
if not obj.is_object():
raise MakeError('TypeError', 'Object.defineProperties called on non-object')
props = properties.to_object()
for name in props:
desc = ToPropertyDescriptor(props.get(name.value))
if not obj.define_own_property(name.value, desc):
raise MakeError('TypeError', 'Failed to define own property: %s'%name.value)
return obj
def seal(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.seal called on non-object')
for desc in obj.own.values():
desc['configurable'] = False
obj.extensible = False
return obj
def freeze(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.freeze called on non-object')
for desc in obj.own.values():
desc['configurable'] = False
if is_data_descriptor(desc):
desc['writable'] = False
obj.extensible = False
return obj
def preventExtensions(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.preventExtensions on non-object')
obj.extensible = False
return obj
def isSealed(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.isSealed called on non-object')
if obj.extensible:
return False
for desc in obj.own.values():
if desc['configurable']:
return False
return True
def isFrozen(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.isFrozen called on non-object')
if obj.extensible:
return False
for desc in obj.own.values():
if desc['configurable']:
return False
if is_data_descriptor(desc) and desc['writable']:
return False
return True
def isExtensible(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.isExtensible called on non-object')
return obj.extensible
def keys(obj):
if not obj.is_object():
raise MakeError('TypeError', 'Object.keys called on non-object')
return [e for e,d in obj.own.iteritems() if d.get('enumerable')]
fill_prototype(Object, ObjectMethods, default_attrs)
fill_in_props(ObjectPrototype, {'constructor':Object}, default_attrs)
Object.define_own_property('prototype', {'value': ObjectPrototype,
'enumerable': False,
'writable': False,
'configurable': False})
def ToPropertyDescriptor(obj):
if obj.TYPE!='Object':
raise MakeError('TypeError', 'Can\'t convert non-object to property descriptor')
desc = {}
if obj.has_property('enumerable'):
desc['enumerable'] = obj.get('enumerable').to_boolean().value
if obj.has_property('configurable'):
desc['configurable'] = obj.get('configurable').to_boolean().value
if obj.has_property('value'):
desc['value'] = obj.get('value')
if obj.has_property('writable'):
desc['writable'] = obj.get('writable').to_boolean().value
if obj.has_property('get'):
cand = obj.get('get')
if not (cand.is_undefined() or cand.is_callable()):
raise MakeError('TypeError', 'Invalid getter (it has to be a function or undefined)')
desc['get'] = cand
if obj.has_property('set'):
cand = obj.get('set')
if not (cand.is_undefined() or cand.is_callable()):
raise MakeError('TypeError', 'Invalid setter (it has to be a function or undefined)')
desc['set'] = cand
if ('get' in desc or 'set' in desc) and ('value' in desc or 'writable' in desc):
raise MakeError('TypeError', 'Invalid property. A property cannot both have accessors and be writable or have a value.')
return desc
| true | true |
f71ea68918003b20c8ce85f2c5cf70c422756b26 | 3,125 | py | Python | fuji_server/models/license_output_inner.py | ignpelloz/fuji | 5e6fe8333c1706d1b628a84108bff7a97fdf11a7 | [
"MIT"
] | 25 | 2020-09-22T08:28:45.000Z | 2022-02-23T07:10:28.000Z | fuji_server/models/license_output_inner.py | ignpelloz/fuji | 5e6fe8333c1706d1b628a84108bff7a97fdf11a7 | [
"MIT"
] | 188 | 2020-05-11T08:54:59.000Z | 2022-03-31T12:28:15.000Z | fuji_server/models/license_output_inner.py | ignpelloz/fuji | 5e6fe8333c1706d1b628a84108bff7a97fdf11a7 | [
"MIT"
] | 20 | 2020-05-04T13:56:26.000Z | 2022-03-02T13:39:04.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from fuji_server.models.base_model_ import Model
from fuji_server import util
class LicenseOutputInner(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, license: str = None, osi_approved: bool = False, details_url: str = None): # noqa: E501
"""LicenseOutputInner - a model defined in Swagger
:param license: The license of this LicenseOutputInner. # noqa: E501
:type license: str
:param osi_approved: The osi_approved of this LicenseOutputInner. # noqa: E501
:type osi_approved: bool
:param details_url: The details_url of this LicenseOutputInner. # noqa: E501
:type details_url: str
"""
self.swagger_types = {'license': str, 'osi_approved': bool, 'details_url': str}
self.attribute_map = {'license': 'license', 'osi_approved': 'OSI_approved', 'details_url': 'details_url'}
self._license = license
self._osi_approved = osi_approved
self._details_url = details_url
@classmethod
def from_dict(cls, dikt) -> 'LicenseOutputInner':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The License_output_inner of this LicenseOutputInner. # noqa: E501
:rtype: LicenseOutputInner
"""
return util.deserialize_model(dikt, cls)
@property
def license(self) -> str:
"""Gets the license of this LicenseOutputInner.
:return: The license of this LicenseOutputInner.
:rtype: str
"""
return self._license
@license.setter
def license(self, license: str):
"""Sets the license of this LicenseOutputInner.
:param license: The license of this LicenseOutputInner.
:type license: str
"""
self._license = license
@property
def osi_approved(self) -> bool:
"""Gets the osi_approved of this LicenseOutputInner.
:return: The osi_approved of this LicenseOutputInner.
:rtype: bool
"""
return self._osi_approved
@osi_approved.setter
def osi_approved(self, osi_approved: bool):
"""Sets the osi_approved of this LicenseOutputInner.
:param osi_approved: The osi_approved of this LicenseOutputInner.
:type osi_approved: bool
"""
self._osi_approved = osi_approved
@property
def details_url(self) -> str:
"""Gets the details_url of this LicenseOutputInner.
:return: The details_url of this LicenseOutputInner.
:rtype: str
"""
return self._details_url
@details_url.setter
def details_url(self, details_url: str):
"""Sets the details_url of this LicenseOutputInner.
:param details_url: The details_url of this LicenseOutputInner.
:type details_url: str
"""
self._details_url = details_url
| 28.935185 | 113 | 0.65056 |
from __future__ import absolute_import
from datetime import date, datetime
from typing import List, Dict
from fuji_server.models.base_model_ import Model
from fuji_server import util
class LicenseOutputInner(Model):
def __init__(self, license: str = None, osi_approved: bool = False, details_url: str = None):
self.swagger_types = {'license': str, 'osi_approved': bool, 'details_url': str}
self.attribute_map = {'license': 'license', 'osi_approved': 'OSI_approved', 'details_url': 'details_url'}
self._license = license
self._osi_approved = osi_approved
self._details_url = details_url
@classmethod
def from_dict(cls, dikt) -> 'LicenseOutputInner':
return util.deserialize_model(dikt, cls)
@property
def license(self) -> str:
return self._license
@license.setter
def license(self, license: str):
self._license = license
@property
def osi_approved(self) -> bool:
return self._osi_approved
@osi_approved.setter
def osi_approved(self, osi_approved: bool):
self._osi_approved = osi_approved
@property
def details_url(self) -> str:
return self._details_url
@details_url.setter
def details_url(self, details_url: str):
self._details_url = details_url
| true | true |
f71ea6b4496eae33b99557e0515c9fe2901709df | 244 | py | Python | problems/303_range_sum_query_immutable.py | wasi0013/leet_code | c589c10f06043fa0ac7643e09ae3903d77c2f8e9 | [
"MIT"
] | null | null | null | problems/303_range_sum_query_immutable.py | wasi0013/leet_code | c589c10f06043fa0ac7643e09ae3903d77c2f8e9 | [
"MIT"
] | null | null | null | problems/303_range_sum_query_immutable.py | wasi0013/leet_code | c589c10f06043fa0ac7643e09ae3903d77c2f8e9 | [
"MIT"
] | null | null | null | class NumArray:
def __init__(self, nums: List[int]):
self.n = list(accumulate(nums))
def sumRange(self, left: int, right: int) -> int:
return self.n[right]- (self.n[left-1] if left>0 else 0)
| 22.181818 | 63 | 0.545082 | class NumArray:
def __init__(self, nums: List[int]):
self.n = list(accumulate(nums))
def sumRange(self, left: int, right: int) -> int:
return self.n[right]- (self.n[left-1] if left>0 else 0)
| true | true |
f71ea6bc8a22ebb8427b7204071db94474f6208a | 5,847 | py | Python | plugins/nxt_plugin/nxt/motcont.py | RodPy/Turtlebots.activity | f885d7d2e5d710c01294ae60da995dfb0eb36b21 | [
"MIT"
] | null | null | null | plugins/nxt_plugin/nxt/motcont.py | RodPy/Turtlebots.activity | f885d7d2e5d710c01294ae60da995dfb0eb36b21 | [
"MIT"
] | null | null | null | plugins/nxt_plugin/nxt/motcont.py | RodPy/Turtlebots.activity | f885d7d2e5d710c01294ae60da995dfb0eb36b21 | [
"MIT"
] | 1 | 2020-06-17T15:44:16.000Z | 2020-06-17T15:44:16.000Z | # nxt.motcont module -- Interface to Linus Atorf's MotorControl NXC
# Copyright (C) 2011 Marcus Wanner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import nxt
import nxt.error
import time
from threading import Lock
class MotorConError(nxt.error.ProtocolError):
pass
def _power(power):
pw = abs(power)
psign = int(power >= 0) * 2 - 1
if psign == -1:
pw += 100
pw = str(pw)
pw = '0'*(3-len(pw))+pw #pad front with 0s to make 3 chars
return pw
def _tacho(tacholimit):
tacho = str(tacholimit)
tacho = '0'*(6-len(tacho))+tacho #pad front with 0s to make 6 chars
return tacho
def interval(delay, lastrun):
now = time.time()
if lastrun+delay > now:
diff = now - lastrun
time.sleep(0.010 - diff)
class MotCont():
'''
This class provides an interface to Linus Atorf's MotorControl NXC
program. It is a wrapper which follows the documentation at
http://www.mindstorms.rwth-aachen.de/trac/wiki/MotorControl
and provides command strings and timing intervals as dictated there. To
use this module, you will need to put MotorControl22.rxe on your NXT
brick. This file and its corresponding source can be found at
http://www.mindstorms.rwth-aachen.de/trac/browser/trunk/tools/MotorControl
You can use nxt_push or any other nxt file manager to put the file on
the NXT. Before using any of the functions here, use MotCont.start() to
start the program. You can also start it manually my using the menu on
the brick. When your script exits, it would be a good idea to do
b.stop_program().
'''
def __init__(self, brick):
self.brick = brick
self.is_ready_lock = Lock()
self.last_is_ready = time.time()-1
self.last_cmd = {}
def cmd(self, port, power, tacholimit, speedreg=1, smoothstart=0, brake=0):
'''
Sends a "CONTROLLED_MOTORCMD" to MotorControl. port is
nxt.motor.PORT_[A-C], power is -100-100, tacholimit is 0-999999,
speedreg is whether to try to maintain speeds under load, and brake is
whether to enable active braking after the motor is in the specified
place (DIFFERENT from the nxt.motor.turn() function's brake arg).'''
interval(0.010, self.last_is_ready)
if port in self.last_cmd:
interval(0.015, self.last_cmd[port])
mode = str(
0x01*int(brake)+
0x02*int(speedreg)+
0x04*int(smoothstart)
)
command = '1'+str(port)+_power(power)+_tacho(tacholimit)+mode
self.brick.message_write(1, command)
self.last_cmd[port] = time.time()
def move_to(self, port, power, tachocount, speedreg=1, smoothstart=0, brake=0):
'''
Same as cmd(), except that the tachocount is subtracted from the motor's
current position and that value is used to turn the motor. Power is
-100-100, but the sign is rewritten as needed.'''
tacho = nxt.Motor(self.brick, port).get_tacho().block_tacho_count
tacho = tachocount-tacho
tsign = int(tacho >= 0) * 2 - 1
tacho = abs(tacho)
power = abs(power)*tsign
self.cmd(port, power, tacho, speedreg, smoothstart, brake)
def reset_tacho(self, port):
'''
Sends a "RESET_ERROR_CORRECTION" to MotorControl, which causes it to
reset the current tacho count for that motor.'''
interval(0.010, self.last_is_ready)
self.brick.message_write(1, '2'+str(port))
self.last_cmd[port] = time.time()
def is_ready(self, port):
'''
Sends an "ISMOTORREADY" to MotorControl and returns the reply.'''
interval(0.010, self.last_is_ready)
with self.is_ready_lock:
self.brick.message_write(1, '3'+str(port))
time.sleep(0.015) #10ms pause from the docs seems to not be adequate
reply = self.brick.message_read(0, 1, 1)[1]
if reply[0] != str(port):
raise MotorConError, 'Wrong port returned from ISMOTORREADY'
self.last_is_ready = time.time()
return bool(int(reply[1]))
def set_output_state(self, port, power, tacholimit, speedreg=1):
'''
Sends a "CLASSIC_MOTORCMD" to MotorControl. Brick is a brick object,
port is nxt.motor.PORT_[A-C], power is -100-100, tacholimit is 0-999999,
speedreg is whether to try to maintain speeds under load, and brake is
whether to enable active braking after the motor is in the specified
place (DIFFERENT from the nxt.motor.turn() function's brake arg).'''
interval(0.010, self.last_is_ready)
if port in self.last_cmd:
interval(0.015, self.last_cmd[port])
command = '4'+str(port)+_power(power)+_tacho(tacholimit)+str(speedreg)
self.brick.message_write(1, command)
self.last_cmd[port] = time.time()
def start(self, version=22):
'''
Starts the MotorControl program on the brick. It needs to already be
present on the brick's flash and named MotorControlXX.rxc, where XX is
the version number passed as the version arg (default is whatever is
bundled with this version of nxt-python).'''
try:
self.brick.stop_program()
except nxt.error.DirProtError:
pass
self.brick.start_program('MotorControl%d.rxe' % version)
time.sleep(0.1)
def stop(self):
'''
Used to stop the MotorControl program. All this actually does is stop
the currently running rxe.'''
self.brick.stop_program()
| 39.77551 | 83 | 0.677612 |
# Copyright (C) 2011 Marcus Wanner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import nxt
import nxt.error
import time
from threading import Lock
class MotorConError(nxt.error.ProtocolError):
pass
def _power(power):
pw = abs(power)
psign = int(power >= 0) * 2 - 1
if psign == -1:
pw += 100
pw = str(pw)
pw = '0'*(3-len(pw))+pw #pad front with 0s to make 3 chars
return pw
def _tacho(tacholimit):
tacho = str(tacholimit)
tacho = '0'*(6-len(tacho))+tacho #pad front with 0s to make 6 chars
return tacho
def interval(delay, lastrun):
now = time.time()
if lastrun+delay > now:
diff = now - lastrun
time.sleep(0.010 - diff)
class MotCont():
'''
This class provides an interface to Linus Atorf's MotorControl NXC
program. It is a wrapper which follows the documentation at
http://www.mindstorms.rwth-aachen.de/trac/wiki/MotorControl
and provides command strings and timing intervals as dictated there. To
use this module, you will need to put MotorControl22.rxe on your NXT
brick. This file and its corresponding source can be found at
http://www.mindstorms.rwth-aachen.de/trac/browser/trunk/tools/MotorControl
You can use nxt_push or any other nxt file manager to put the file on
the NXT. Before using any of the functions here, use MotCont.start() to
start the program. You can also start it manually my using the menu on
the brick. When your script exits, it would be a good idea to do
b.stop_program().
'''
def __init__(self, brick):
self.brick = brick
self.is_ready_lock = Lock()
self.last_is_ready = time.time()-1
self.last_cmd = {}
def cmd(self, port, power, tacholimit, speedreg=1, smoothstart=0, brake=0):
'''
Sends a "CONTROLLED_MOTORCMD" to MotorControl. port is
nxt.motor.PORT_[A-C], power is -100-100, tacholimit is 0-999999,
speedreg is whether to try to maintain speeds under load, and brake is
whether to enable active braking after the motor is in the specified
place (DIFFERENT from the nxt.motor.turn() function's brake arg).'''
interval(0.010, self.last_is_ready)
if port in self.last_cmd:
interval(0.015, self.last_cmd[port])
mode = str(
0x01*int(brake)+
0x02*int(speedreg)+
0x04*int(smoothstart)
)
command = '1'+str(port)+_power(power)+_tacho(tacholimit)+mode
self.brick.message_write(1, command)
self.last_cmd[port] = time.time()
def move_to(self, port, power, tachocount, speedreg=1, smoothstart=0, brake=0):
'''
Same as cmd(), except that the tachocount is subtracted from the motor's
current position and that value is used to turn the motor. Power is
-100-100, but the sign is rewritten as needed.'''
tacho = nxt.Motor(self.brick, port).get_tacho().block_tacho_count
tacho = tachocount-tacho
tsign = int(tacho >= 0) * 2 - 1
tacho = abs(tacho)
power = abs(power)*tsign
self.cmd(port, power, tacho, speedreg, smoothstart, brake)
def reset_tacho(self, port):
'''
Sends a "RESET_ERROR_CORRECTION" to MotorControl, which causes it to
reset the current tacho count for that motor.'''
interval(0.010, self.last_is_ready)
self.brick.message_write(1, '2'+str(port))
self.last_cmd[port] = time.time()
def is_ready(self, port):
'''
Sends an "ISMOTORREADY" to MotorControl and returns the reply.'''
interval(0.010, self.last_is_ready)
with self.is_ready_lock:
self.brick.message_write(1, '3'+str(port))
time.sleep(0.015)
reply = self.brick.message_read(0, 1, 1)[1]
if reply[0] != str(port):
raise MotorConError, 'Wrong port returned from ISMOTORREADY'
self.last_is_ready = time.time()
return bool(int(reply[1]))
def set_output_state(self, port, power, tacholimit, speedreg=1):
'''
Sends a "CLASSIC_MOTORCMD" to MotorControl. Brick is a brick object,
port is nxt.motor.PORT_[A-C], power is -100-100, tacholimit is 0-999999,
speedreg is whether to try to maintain speeds under load, and brake is
whether to enable active braking after the motor is in the specified
place (DIFFERENT from the nxt.motor.turn() function's brake arg).'''
interval(0.010, self.last_is_ready)
if port in self.last_cmd:
interval(0.015, self.last_cmd[port])
command = '4'+str(port)+_power(power)+_tacho(tacholimit)+str(speedreg)
self.brick.message_write(1, command)
self.last_cmd[port] = time.time()
def start(self, version=22):
'''
Starts the MotorControl program on the brick. It needs to already be
present on the brick's flash and named MotorControlXX.rxc, where XX is
the version number passed as the version arg (default is whatever is
bundled with this version of nxt-python).'''
try:
self.brick.stop_program()
except nxt.error.DirProtError:
pass
self.brick.start_program('MotorControl%d.rxe' % version)
time.sleep(0.1)
def stop(self):
'''
Used to stop the MotorControl program. All this actually does is stop
the currently running rxe.'''
self.brick.stop_program()
| false | true |
f71ea7abd7acda31d3df346c7631db22cb58ddb5 | 13,002 | py | Python | house_rocket_app.py | Leonardodsch/house-rocket-insights | dd8405b776e223ec5ff8392a027d4b0116fcd7ca | [
"MIT"
] | 1 | 2021-12-24T13:40:09.000Z | 2021-12-24T13:40:09.000Z | house_rocket_app.py | Leonardodsch/house-rocket-insights | dd8405b776e223ec5ff8392a027d4b0116fcd7ca | [
"MIT"
] | null | null | null | house_rocket_app.py | Leonardodsch/house-rocket-insights | dd8405b776e223ec5ff8392a027d4b0116fcd7ca | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import streamlit as st
import plotly.express as px
import ipywidgets as widgets
from ipywidgets import fixed
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('whitegrid')
st.set_page_config(layout='wide')
@st.cache(allow_output_mutation=True)
def get_data(path):
data = pd.read_csv(path)
return data
def barplot(a,b, aux):
plot = sns.barplot(x=a, y=b, data=aux, edgecolor='k', palette='Blues')
sns.despine()
return plot
# get data
path = 'data/df_sugestions01.csv'
path2 = 'data/df_sugestions02.csv'
path3 = 'data/df_full.csv'
data = get_data(path)
df = get_data(path2)
df1 = get_data(path3)
st.sidebar.write()
f_zipcode = st.sidebar.multiselect('Select Zipcode', data['zipcode'].unique())
f_condition = st.sidebar.multiselect('Select Condition', data['condition'].sort_values(ascending=True).unique())
f_buy = st.sidebar.multiselect('Select buy option', data['buy'].unique())
f_season = st.sidebar.multiselect('Select season', df['season'].unique())
min_price = int(df['price'].min())
max_price = int(df['price'].max())
median_price = int(df['price'].median())
st.title('House Rocket')
st.write('A House Rocket é uma empresa focada na compra e venda de imóveis, buscando avaliar e encontrar bons negócios para constituir seu portfólio e oferecer também bons'
' negocios para seus clientes. Diante disso foi realizada uma análise onde diversos imóveis foram explorados e avaliados buscando o que poderia se tornar uma boa oportunidade para a empresa'
' e alguns insights interessantes foram descobertos, algo que se tornará de extremo valor caso seja bem utilizado.'
'Para detalhes mais técnicos e visualização do projeto completo acessar:' ' [GitHub](https://github.com/Leonardodsch/house-rocket-insights)')
st.title('Business Questions')
st.write('As tabelas são interativas e podem ser filtradas a partir das opções na barra lateral, permitindo assim que os imóveis'
' possam ser exibidos de acordo com a preferência.')
st.header(' Quais são os imóveis que a House Rocket deveria comprar e por qual preço ?')
st.write(' Na primeita tabela estão os imóveis agrupados por região (zipcode), com os preços médios de cada região. Estes são avaliados juntamente com o valor'
' da coluna condition de cada imóvel, para assim ser feita uma sugestão de compra ou não')
st.header(' Uma vez a casa comprada, qual o melhor momento para vendê-las e por qual preço ?')
st.write('Na segunda tabela é possivel filtrar os imóveis pela região mas também pela sazonalidade, o que permite ver as melhores opções de compra em cada estação do ano'
' e o valor da venda baseado nas premissas de assumidas no começo do projeto')
if (f_zipcode != []) & (f_condition == []) & (f_buy == []) & (f_season == []):
st.write(data.loc[data['zipcode'].isin(f_zipcode)])
st.write(df.loc[(df['zipcode'].isin(f_zipcode))])
elif (f_condition != []) & (f_zipcode != []) & (f_buy != []) & (f_season != []):
st.write(data.loc[(data['condition'].isin(f_condition)) & (data['zipcode'].isin(f_zipcode)) & (data['buy'].isin(f_buy))])
st.write(df.loc[(df['season'].isin(f_season)) & (df['zipcode'].isin(f_zipcode))])
elif (f_condition != []) & (f_zipcode == []) & (f_buy == []) & (f_season == []):
st.write(data.loc[data['condition'].isin(f_condition)])
st.dataframe(df)
elif (f_buy != []) & (f_zipcode == []) & (f_condition == []) & (f_season == []):
st.write(data.loc[data['buy'].isin(f_buy)])
st.dataframe(df)
elif (f_condition != []) & (f_zipcode != []) & (f_buy == []) & (f_season != []):
st.write(data.loc[(data['condition'].isin(f_condition)) & (data['zipcode'].isin(f_zipcode))])
st.write(df.loc[(df['season'].isin(f_season)) & (df['zipcode'].isin(f_zipcode))])
elif (f_condition == []) & (f_zipcode != []) & (f_buy != []) & (f_season == []):
st.write(data.loc[(data['zipcode'].isin(f_zipcode)) & (data['buy'].isin(f_buy))])
st.write(df.loc[(df['zipcode'].isin(f_zipcode))])
elif (f_season != []) & (f_zipcode == []) & (f_buy == []) & (f_condition == []):
st.dataframe(data, height=400, width=700)
st.write(df.loc[(df['season'].isin(f_season))])
elif (f_season != []) & (f_zipcode == []) & (f_buy != []) & (f_condition == []):
st.write(data.loc[data['buy'].isin(f_buy)])
st.write(df.loc[df['season'].isin(f_season)])
elif (f_season != []) & (f_zipcode == []) & (f_buy == []) & (f_condition != []):
st.write(data.loc[data['condition'].isin(f_condition)])
st.write(df.loc[df['season'].isin(f_season)])
elif (f_season != []) & (f_zipcode == []) & (f_buy != []) & (f_condition != []):
st.write(data.loc[data['condition'].isin(f_condition) & (data['buy'].isin(f_buy))])
st.write(df.loc[df['season'].isin(f_season)])
elif (f_zipcode != []) & (f_condition == []) & (f_buy == []) & (f_season != []):
st.write(data.loc[data['zipcode'].isin(f_zipcode)])
st.write(df.loc[(df['season'].isin(f_season)) & (df['zipcode'].isin(f_zipcode))])
elif (f_condition == []) & (f_zipcode != []) & (f_buy != []) & (f_season != []):
st.write(data.loc[(data['zipcode'].isin(f_zipcode)) & (data['buy'].isin(f_buy))])
st.write(df.loc[(df['season'].isin(f_season)) & (df['zipcode'].isin(f_zipcode))])
elif (f_condition != []) & (f_zipcode != []) & (f_buy == []) & (f_season == []):
st.write(data.loc[(data['condition'].isin(f_condition)) & (data['zipcode'].isin(f_zipcode))])
st.write(df.loc[(df['zipcode'].isin(f_zipcode))])
elif (f_condition != []) & (f_zipcode != []) & (f_buy != []) & (f_season == []):
st.write(data.loc[(data['condition'].isin(f_condition)) & (data['zipcode'].isin(f_zipcode)) & (data['buy'].isin(f_buy))])
st.write(df.loc[(df['zipcode'].isin(f_zipcode))])
else:
data = data.copy()
df = df.copy()
st.dataframe(data, height=400, width=700)
st.dataframe(df)
st.header('Mapa com as indicações de compra')
is_check = st.checkbox('Show Map')
if is_check:
selected_price_range = st.slider('Select the price range', min_price, max_price, median_price)
buy_select = st.multiselect('Buy option', df1['buy'].unique())
if (buy_select != []):
# select rows
houses = df1[(df1['price'] < selected_price_range) & (df1['buy'].isin(buy_select))][['id','zipcode','price','median_price','condition', 'lat', 'long']]
# draw map
fig = px.scatter_mapbox(
houses,
lat='lat',
lon='long',
color="condition",
size="price",
color_continuous_scale=px.colors.cyclical.IceFire,
size_max=15,
zoom=10 )
fig.update_layout(mapbox_style="open-street-map")
fig.update_layout(height=600, margin={"r":0,"t":0,"l":0,"b":0})
st.plotly_chart(fig)
else:
# select rows
houses = df1[['id','zipcode','price','median_price','condition', 'lat', 'long']].copy()
# draw map
fig = px.scatter_mapbox(
houses,
lat='lat',
lon='long',
color="condition",
size="price",
color_continuous_scale=px.colors.cyclical.IceFire,
size_max=15,
zoom=10 )
fig.update_layout(mapbox_style="open-street-map")
fig.update_layout(height=600, margin={"r":0,"t":0,"l":0,"b":0})
st.plotly_chart(fig)
st.title('Business Hypothesis')
# H1
st.header('H1: Imóveis que possuem vista para água, são 30% mais caros, na média')
st.text('Falsa! Imóveis com vista para a agua são 200% mais caros na mádia')
aux = df1[['price','waterfront']].groupby('waterfront').mean().reset_index()
fig = plt.figure(figsize=(9,3))
barplot('waterfront','price',aux)
st.pyplot(fig)
#H2
st.header('H2: Imóveis com data de construção menor que 1955, são 50% mais baratos, na média')
st.text('Falsa! Imóveis com data de construção menot do que 1955 são aproximadamente 1,6% mais baratos')
aux2 = df1[['price','yr_built']].copy()
aux2['yr_built'] = aux2['yr_built'].apply(lambda x: '<= 1955' if x <= 1955 else '> 1955')
aux = aux2[['price','yr_built']].groupby('yr_built').mean().reset_index()
fig2 = plt.figure(figsize=(9,3))
barplot('yr_built','price',aux)
st.pyplot(fig2)
# Evolution over the year
st.header('Evolution over the years')
aux = df1[['price','yr_built']].loc[df1['yr_built'] <= 1955].groupby('yr_built').mean().reset_index()
aux2 = df1[['price','yr_built']].loc[df1['yr_built'] > 1955].groupby('yr_built').mean().reset_index()
fig_ = plt.figure(figsize=(15,7))
plt.subplot(2,1,1)
barplot('yr_built','price', aux)
plt.xticks(rotation=60);
plt.title('Yr_built <= 1955')
plt.subplot(2,1,2)
barplot('yr_built','price',aux2)
plt.xticks(rotation=60);
plt.title('Yr_built > 1955')
plt.tight_layout()
st.pyplot(fig_)
#H3
st.header('H3: Imóveis sem porão possuem area total (sqrt_lot), são 50% maiores do que com porão')
st.text('Falsa! Imóveis sem porão possuem uma area total 23% maior')
aux = df1[['sqft_basement','sqft_lot']].copy()
aux['sqft_basement'] = aux['sqft_basement'].apply(lambda x: 'yes' if x != 0 else 'no')
aux1 = aux[['sqft_basement','sqft_lot']].groupby('sqft_basement').mean().reset_index()
aux1.sort_values(by='sqft_lot', ascending=True, inplace=True)
fig3 = plt.figure(figsize=(9,3))
barplot('sqft_basement','sqft_lot',aux1)
st.pyplot(fig3)
#4
st.header('H4: O crescimento do preço dos imóveis YoY ( Year over Year ) é de 10%')
st.text('Falsa O crescimento do preço dos imoveis YoY é de 2%')
aux = df1[['price','year']].loc[df1['month'] == 5].copy()
aux1 = aux[['price','year']].groupby('year').mean().reset_index()
fig4 = plt.figure(figsize=(9,3))
barplot('year','price',aux1)
st.pyplot(fig4)
#5
st.header('H5: Imóveis com 3 banheiros tem um crescimento MoM ( Month over Month ) de 15%')
st.text('Falsa! Imóveis com 3 banheiros não possuem um crescimento MoM de 15%')
aux = df1[['price','month']].loc[df1['bathrooms'] == 3].groupby(['month']).mean().reset_index()
aux['growth'] = aux['price'].pct_change()
fig5 = plt.figure(figsize=(9,3))
plt.subplot(2,1,1)
plt.plot('month','price', data=aux)
plt.ylabel('Price')
plt.subplot(2,1,2)
barplot('month','growth',aux)
st.pyplot(fig5)
#6
st.header('H6: Imóveis com 3 ou mais banheiros são 30% mais caros, na média')
st.text('Falsa! Impoveis com 3 ou mais banheiros são 100% mais caros na média')
aux = df1[['bathrooms','price']].copy()
aux['bathrooms'] = aux['bathrooms'].apply(lambda x: '>= 3' if x >=3 else '< 3')
aux1 = aux[['price','bathrooms']].groupby('bathrooms').mean().reset_index()
fig6 = plt.figure(figsize=(9,3))
barplot('bathrooms','price',aux1)
st.pyplot(fig6)
#7
st.header('H7: Imóveis com condition igual ou maior do que 4 são 40% mais caros, na média')
st.text('Falsa! Imóveis com condition igual ou maior do que 4 são 0,5% mais caros, na média')
aux = df1[['price','condition']].copy()
aux['condition'] = aux['condition'].apply(lambda x: '< 4' if x < 4 else '>= 4')
aux1 = aux[['price','condition']].groupby('condition').mean().reset_index()
fig7 = plt.figure(figsize=(9,3))
barplot('condition','price',aux1)
st.pyplot(fig7)
#8
st.header('H8: Imóveis vendidos no inverno são 30% mais baratos na média do que imóveis vendidos no verão')
st.text('Falsa! Imóveis vendidos no inverno são 4% mais baratos na média do que imóveis vendidos no verão')
aux = df1[['price','season']].loc[(df1['season'] == 'winter') | (df1['season'] == 'summer') ].copy()
aux1 = aux[['price','season']].groupby('season').mean().reset_index()
aux1.sort_values(by='price', ascending=True, inplace=True)
fig8 = plt.figure(figsize=(9,3))
barplot('season','price',aux1)
st.pyplot(fig8)
#9
st.header('H9: Imóveis com mais de 400m2 (m2_living) são 50% mais caros na media')
st.text('Falsa! Imóveis com mais de 400m2 são 230% mais caros na média')
aux = df1[['price','m2_living']].copy()
aux['m2_living'] = aux['m2_living'].apply(lambda x: '< 400' if x < 400 else '> 400')
aux1= aux[['price','m2_living']].groupby('m2_living').mean().reset_index()
fig9 = plt.figure(figsize=(9,3))
barplot('m2_living','price',aux1)
st.pyplot(fig9)
#10
st.header('H10: Imóveis com menos de 100m2 tem um crescimento Mom ( Month over Month ) de 20%')
st.text('Falsa! Imóveis com menos de 100m2 não possuem um crescimento MoM de 20%')
aux = df1[['price','month']].loc[df1['m2_living'] < 100 ].groupby('month').mean().reset_index()
aux['growth'] = aux['price'].pct_change()
fig10 = plt.figure(figsize=(9,3))
plt.subplot(2,1,1)
plt.plot('month','price', data=aux)
plt.ylabel('Price')
plt.subplot(2,1,2)
barplot('month','growth',aux)
st.pyplot(fig10)
#11
st.header('H11: Imóveis com 4 ou mais quartos são 50% mais caros, na média')
st.text('Verdadeira! Imóveis com 4 ou mais quartos são 50% mais caros, na média')
aux = df1[['bedrooms','price']].copy()
aux['bedrooms'] = aux['bedrooms'].apply(lambda x: '< 4' if x < 4 else '>= 4')
aux1= aux[['price','bedrooms']].groupby('bedrooms').mean().reset_index()
fig11 = plt.figure(figsize=(9,3))
barplot('bedrooms','price',aux1)
st.pyplot(fig11)
| 42.769737 | 199 | 0.659745 | import pandas as pd
import numpy as np
import streamlit as st
import plotly.express as px
import ipywidgets as widgets
from ipywidgets import fixed
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('whitegrid')
st.set_page_config(layout='wide')
@st.cache(allow_output_mutation=True)
def get_data(path):
data = pd.read_csv(path)
return data
def barplot(a,b, aux):
plot = sns.barplot(x=a, y=b, data=aux, edgecolor='k', palette='Blues')
sns.despine()
return plot
path = 'data/df_sugestions01.csv'
path2 = 'data/df_sugestions02.csv'
path3 = 'data/df_full.csv'
data = get_data(path)
df = get_data(path2)
df1 = get_data(path3)
st.sidebar.write()
f_zipcode = st.sidebar.multiselect('Select Zipcode', data['zipcode'].unique())
f_condition = st.sidebar.multiselect('Select Condition', data['condition'].sort_values(ascending=True).unique())
f_buy = st.sidebar.multiselect('Select buy option', data['buy'].unique())
f_season = st.sidebar.multiselect('Select season', df['season'].unique())
min_price = int(df['price'].min())
max_price = int(df['price'].max())
median_price = int(df['price'].median())
st.title('House Rocket')
st.write('A House Rocket é uma empresa focada na compra e venda de imóveis, buscando avaliar e encontrar bons negócios para constituir seu portfólio e oferecer também bons'
' negocios para seus clientes. Diante disso foi realizada uma análise onde diversos imóveis foram explorados e avaliados buscando o que poderia se tornar uma boa oportunidade para a empresa'
' e alguns insights interessantes foram descobertos, algo que se tornará de extremo valor caso seja bem utilizado.'
'Para detalhes mais técnicos e visualização do projeto completo acessar:' ' [GitHub](https://github.com/Leonardodsch/house-rocket-insights)')
st.title('Business Questions')
st.write('As tabelas são interativas e podem ser filtradas a partir das opções na barra lateral, permitindo assim que os imóveis'
' possam ser exibidos de acordo com a preferência.')
st.header(' Quais são os imóveis que a House Rocket deveria comprar e por qual preço ?')
st.write(' Na primeita tabela estão os imóveis agrupados por região (zipcode), com os preços médios de cada região. Estes são avaliados juntamente com o valor'
' da coluna condition de cada imóvel, para assim ser feita uma sugestão de compra ou não')
st.header(' Uma vez a casa comprada, qual o melhor momento para vendê-las e por qual preço ?')
st.write('Na segunda tabela é possivel filtrar os imóveis pela região mas também pela sazonalidade, o que permite ver as melhores opções de compra em cada estação do ano'
' e o valor da venda baseado nas premissas de assumidas no começo do projeto')
if (f_zipcode != []) & (f_condition == []) & (f_buy == []) & (f_season == []):
st.write(data.loc[data['zipcode'].isin(f_zipcode)])
st.write(df.loc[(df['zipcode'].isin(f_zipcode))])
elif (f_condition != []) & (f_zipcode != []) & (f_buy != []) & (f_season != []):
st.write(data.loc[(data['condition'].isin(f_condition)) & (data['zipcode'].isin(f_zipcode)) & (data['buy'].isin(f_buy))])
st.write(df.loc[(df['season'].isin(f_season)) & (df['zipcode'].isin(f_zipcode))])
elif (f_condition != []) & (f_zipcode == []) & (f_buy == []) & (f_season == []):
st.write(data.loc[data['condition'].isin(f_condition)])
st.dataframe(df)
elif (f_buy != []) & (f_zipcode == []) & (f_condition == []) & (f_season == []):
st.write(data.loc[data['buy'].isin(f_buy)])
st.dataframe(df)
elif (f_condition != []) & (f_zipcode != []) & (f_buy == []) & (f_season != []):
st.write(data.loc[(data['condition'].isin(f_condition)) & (data['zipcode'].isin(f_zipcode))])
st.write(df.loc[(df['season'].isin(f_season)) & (df['zipcode'].isin(f_zipcode))])
elif (f_condition == []) & (f_zipcode != []) & (f_buy != []) & (f_season == []):
st.write(data.loc[(data['zipcode'].isin(f_zipcode)) & (data['buy'].isin(f_buy))])
st.write(df.loc[(df['zipcode'].isin(f_zipcode))])
elif (f_season != []) & (f_zipcode == []) & (f_buy == []) & (f_condition == []):
st.dataframe(data, height=400, width=700)
st.write(df.loc[(df['season'].isin(f_season))])
elif (f_season != []) & (f_zipcode == []) & (f_buy != []) & (f_condition == []):
st.write(data.loc[data['buy'].isin(f_buy)])
st.write(df.loc[df['season'].isin(f_season)])
elif (f_season != []) & (f_zipcode == []) & (f_buy == []) & (f_condition != []):
st.write(data.loc[data['condition'].isin(f_condition)])
st.write(df.loc[df['season'].isin(f_season)])
elif (f_season != []) & (f_zipcode == []) & (f_buy != []) & (f_condition != []):
st.write(data.loc[data['condition'].isin(f_condition) & (data['buy'].isin(f_buy))])
st.write(df.loc[df['season'].isin(f_season)])
elif (f_zipcode != []) & (f_condition == []) & (f_buy == []) & (f_season != []):
st.write(data.loc[data['zipcode'].isin(f_zipcode)])
st.write(df.loc[(df['season'].isin(f_season)) & (df['zipcode'].isin(f_zipcode))])
elif (f_condition == []) & (f_zipcode != []) & (f_buy != []) & (f_season != []):
st.write(data.loc[(data['zipcode'].isin(f_zipcode)) & (data['buy'].isin(f_buy))])
st.write(df.loc[(df['season'].isin(f_season)) & (df['zipcode'].isin(f_zipcode))])
elif (f_condition != []) & (f_zipcode != []) & (f_buy == []) & (f_season == []):
st.write(data.loc[(data['condition'].isin(f_condition)) & (data['zipcode'].isin(f_zipcode))])
st.write(df.loc[(df['zipcode'].isin(f_zipcode))])
elif (f_condition != []) & (f_zipcode != []) & (f_buy != []) & (f_season == []):
st.write(data.loc[(data['condition'].isin(f_condition)) & (data['zipcode'].isin(f_zipcode)) & (data['buy'].isin(f_buy))])
st.write(df.loc[(df['zipcode'].isin(f_zipcode))])
else:
data = data.copy()
df = df.copy()
st.dataframe(data, height=400, width=700)
st.dataframe(df)
st.header('Mapa com as indicações de compra')
is_check = st.checkbox('Show Map')
if is_check:
selected_price_range = st.slider('Select the price range', min_price, max_price, median_price)
buy_select = st.multiselect('Buy option', df1['buy'].unique())
if (buy_select != []):
houses = df1[(df1['price'] < selected_price_range) & (df1['buy'].isin(buy_select))][['id','zipcode','price','median_price','condition', 'lat', 'long']]
fig = px.scatter_mapbox(
houses,
lat='lat',
lon='long',
color="condition",
size="price",
color_continuous_scale=px.colors.cyclical.IceFire,
size_max=15,
zoom=10 )
fig.update_layout(mapbox_style="open-street-map")
fig.update_layout(height=600, margin={"r":0,"t":0,"l":0,"b":0})
st.plotly_chart(fig)
else:
houses = df1[['id','zipcode','price','median_price','condition', 'lat', 'long']].copy()
fig = px.scatter_mapbox(
houses,
lat='lat',
lon='long',
color="condition",
size="price",
color_continuous_scale=px.colors.cyclical.IceFire,
size_max=15,
zoom=10 )
fig.update_layout(mapbox_style="open-street-map")
fig.update_layout(height=600, margin={"r":0,"t":0,"l":0,"b":0})
st.plotly_chart(fig)
st.title('Business Hypothesis')
st.header('H1: Imóveis que possuem vista para água, são 30% mais caros, na média')
st.text('Falsa! Imóveis com vista para a agua são 200% mais caros na mádia')
aux = df1[['price','waterfront']].groupby('waterfront').mean().reset_index()
fig = plt.figure(figsize=(9,3))
barplot('waterfront','price',aux)
st.pyplot(fig)
st.header('H2: Imóveis com data de construção menor que 1955, são 50% mais baratos, na média')
st.text('Falsa! Imóveis com data de construção menot do que 1955 são aproximadamente 1,6% mais baratos')
aux2 = df1[['price','yr_built']].copy()
aux2['yr_built'] = aux2['yr_built'].apply(lambda x: '<= 1955' if x <= 1955 else '> 1955')
aux = aux2[['price','yr_built']].groupby('yr_built').mean().reset_index()
fig2 = plt.figure(figsize=(9,3))
barplot('yr_built','price',aux)
st.pyplot(fig2)
st.header('Evolution over the years')
aux = df1[['price','yr_built']].loc[df1['yr_built'] <= 1955].groupby('yr_built').mean().reset_index()
aux2 = df1[['price','yr_built']].loc[df1['yr_built'] > 1955].groupby('yr_built').mean().reset_index()
fig_ = plt.figure(figsize=(15,7))
plt.subplot(2,1,1)
barplot('yr_built','price', aux)
plt.xticks(rotation=60);
plt.title('Yr_built <= 1955')
plt.subplot(2,1,2)
barplot('yr_built','price',aux2)
plt.xticks(rotation=60);
plt.title('Yr_built > 1955')
plt.tight_layout()
st.pyplot(fig_)
st.header('H3: Imóveis sem porão possuem area total (sqrt_lot), são 50% maiores do que com porão')
st.text('Falsa! Imóveis sem porão possuem uma area total 23% maior')
aux = df1[['sqft_basement','sqft_lot']].copy()
aux['sqft_basement'] = aux['sqft_basement'].apply(lambda x: 'yes' if x != 0 else 'no')
aux1 = aux[['sqft_basement','sqft_lot']].groupby('sqft_basement').mean().reset_index()
aux1.sort_values(by='sqft_lot', ascending=True, inplace=True)
fig3 = plt.figure(figsize=(9,3))
barplot('sqft_basement','sqft_lot',aux1)
st.pyplot(fig3)
st.header('H4: O crescimento do preço dos imóveis YoY ( Year over Year ) é de 10%')
st.text('Falsa O crescimento do preço dos imoveis YoY é de 2%')
aux = df1[['price','year']].loc[df1['month'] == 5].copy()
aux1 = aux[['price','year']].groupby('year').mean().reset_index()
fig4 = plt.figure(figsize=(9,3))
barplot('year','price',aux1)
st.pyplot(fig4)
st.header('H5: Imóveis com 3 banheiros tem um crescimento MoM ( Month over Month ) de 15%')
st.text('Falsa! Imóveis com 3 banheiros não possuem um crescimento MoM de 15%')
aux = df1[['price','month']].loc[df1['bathrooms'] == 3].groupby(['month']).mean().reset_index()
aux['growth'] = aux['price'].pct_change()
fig5 = plt.figure(figsize=(9,3))
plt.subplot(2,1,1)
plt.plot('month','price', data=aux)
plt.ylabel('Price')
plt.subplot(2,1,2)
barplot('month','growth',aux)
st.pyplot(fig5)
st.header('H6: Imóveis com 3 ou mais banheiros são 30% mais caros, na média')
st.text('Falsa! Impoveis com 3 ou mais banheiros são 100% mais caros na média')
aux = df1[['bathrooms','price']].copy()
aux['bathrooms'] = aux['bathrooms'].apply(lambda x: '>= 3' if x >=3 else '< 3')
aux1 = aux[['price','bathrooms']].groupby('bathrooms').mean().reset_index()
fig6 = plt.figure(figsize=(9,3))
barplot('bathrooms','price',aux1)
st.pyplot(fig6)
st.header('H7: Imóveis com condition igual ou maior do que 4 são 40% mais caros, na média')
st.text('Falsa! Imóveis com condition igual ou maior do que 4 são 0,5% mais caros, na média')
aux = df1[['price','condition']].copy()
aux['condition'] = aux['condition'].apply(lambda x: '< 4' if x < 4 else '>= 4')
aux1 = aux[['price','condition']].groupby('condition').mean().reset_index()
fig7 = plt.figure(figsize=(9,3))
barplot('condition','price',aux1)
st.pyplot(fig7)
st.header('H8: Imóveis vendidos no inverno são 30% mais baratos na média do que imóveis vendidos no verão')
st.text('Falsa! Imóveis vendidos no inverno são 4% mais baratos na média do que imóveis vendidos no verão')
aux = df1[['price','season']].loc[(df1['season'] == 'winter') | (df1['season'] == 'summer') ].copy()
aux1 = aux[['price','season']].groupby('season').mean().reset_index()
aux1.sort_values(by='price', ascending=True, inplace=True)
fig8 = plt.figure(figsize=(9,3))
barplot('season','price',aux1)
st.pyplot(fig8)
st.header('H9: Imóveis com mais de 400m2 (m2_living) são 50% mais caros na media')
st.text('Falsa! Imóveis com mais de 400m2 são 230% mais caros na média')
aux = df1[['price','m2_living']].copy()
aux['m2_living'] = aux['m2_living'].apply(lambda x: '< 400' if x < 400 else '> 400')
aux1= aux[['price','m2_living']].groupby('m2_living').mean().reset_index()
fig9 = plt.figure(figsize=(9,3))
barplot('m2_living','price',aux1)
st.pyplot(fig9)
st.header('H10: Imóveis com menos de 100m2 tem um crescimento Mom ( Month over Month ) de 20%')
st.text('Falsa! Imóveis com menos de 100m2 não possuem um crescimento MoM de 20%')
aux = df1[['price','month']].loc[df1['m2_living'] < 100 ].groupby('month').mean().reset_index()
aux['growth'] = aux['price'].pct_change()
fig10 = plt.figure(figsize=(9,3))
plt.subplot(2,1,1)
plt.plot('month','price', data=aux)
plt.ylabel('Price')
plt.subplot(2,1,2)
barplot('month','growth',aux)
st.pyplot(fig10)
st.header('H11: Imóveis com 4 ou mais quartos são 50% mais caros, na média')
st.text('Verdadeira! Imóveis com 4 ou mais quartos são 50% mais caros, na média')
aux = df1[['bedrooms','price']].copy()
aux['bedrooms'] = aux['bedrooms'].apply(lambda x: '< 4' if x < 4 else '>= 4')
aux1= aux[['price','bedrooms']].groupby('bedrooms').mean().reset_index()
fig11 = plt.figure(figsize=(9,3))
barplot('bedrooms','price',aux1)
st.pyplot(fig11)
| true | true |
f71ea7c37ca8c9223d397b63be289fc1ae452dd6 | 10,645 | py | Python | extractor.py | vivdiwakar/BambooHR | c1471d4b743aace11cb39efca42be6250d37dc6e | [
"BSD-3-Clause"
] | 1 | 2019-05-15T07:25:01.000Z | 2019-05-15T07:25:01.000Z | extractor.py | vivdiwakar/BambooHR | c1471d4b743aace11cb39efca42be6250d37dc6e | [
"BSD-3-Clause"
] | null | null | null | extractor.py | vivdiwakar/BambooHR | c1471d4b743aace11cb39efca42be6250d37dc6e | [
"BSD-3-Clause"
] | 1 | 2021-08-04T20:44:48.000Z | 2021-08-04T20:44:48.000Z | import argparse
import datetime
import sys
import requests
from os import makedirs
from os.path import dirname, exists
from re import search, sub, escape
import xmltodict
# Setup the CLI arguments parser
parser = argparse.ArgumentParser()
parser.add_argument('auth', help='User API auth key.', type=str)
parser.add_argument('company', help='Company name within BambooHR.', type=str)
parser.add_argument('dest', help='Full path to CSV and artifacts destination.', type=str)
args = parser.parse_args()
epochNow = datetime.datetime.today().strftime('%Y%m%d_%s')
APIPrefix = 'https://api.bamboohr.com/api/gateway.php/' + args.company + '/v1'
userTables = ['jobInfo', 'employmentStatus', 'emergencyContacts', 'compensation', 'customBankDetails',
'customRSADetails', 'employeedependents']
def fetchFromAPI(url, outform):
try:
results = requests.get(url, headers={'Accept': 'application/json'}, auth=(args.auth, ":x"))
if results.status_code == 200:
if outform == 'json':
return results.json()
elif outform == 'xml':
return results.text
else:
sys.stderr.write('API Request error on "' + url + '"; exiting...' + "\n")
exit(1)
except (requests.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout) as e:
sys.stderr.write('ERROR: ' + str(e) + '; exiting...' + "\n")
exit(1)
def fetchBinaryFile(url, destination):
try:
binary = requests.get(url, headers={'Accept': 'application/json'}, auth=(args.auth, ":x"))
directory = dirname(destination)
if not exists(directory):
makedirs(directory)
with open(destination, 'wb') as f:
f.write(binary.content)
f.close()
except (requests.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout) as e:
sys.stderr.write('ERROR: ' + str(e) + '; exiting...' + "\n")
exit(1)
def openFileHandler(fileName):
try:
fh = open(fileName, 'a')
return fh
except (PermissionError, OSError, IOError) as e:
sys.stderr.write('ERROR: ' + str(e) + '; exiting...' + "\n")
exit(1)
def processAttrValue(String):
if str(String) == "None" or str(String) == "":
return '-,'
else:
if search("'", str(String)):
return sub("'", '', str(String)) + ','
elif search(",", str(String)):
return sub(r'(.*)', r'"\1"', str(String)) + ','
else:
return str(String) + ','
def checkHeaderForAttribute(fileName, keyword):
try:
fh = open(fileName, 'r')
firstLine = fh.readline()
fh.close()
if search(keyword, firstLine):
return True
else:
return False
except FileNotFoundError:
return False
except (PermissionError, OSError, IOError) as e:
sys.stderr.write('ERROR: ' + str(e) + '; exiting...' + "\n")
exit(1)
def processAPIInfo(httpReturn, allKeys, subKeyList):
csvOutput = ''
if isinstance(httpReturn, dict):
csvOutput = processAttrValue(employee)
for key in allKeys:
if key in subKeyList.keys():
for tag in subKeyList[key]:
csvOutput += processAttrValue(httpReturn[key][tag])
else:
csvOutput += processAttrValue(httpReturn[key])
else:
index = -1
for index in range(len(httpReturn) - 1):
csvOutput += processAPIInfo(httpReturn[index], allKeys, subKeyList) + '\n'
csvOutput += processAPIInfo(httpReturn[(index + 1)], allKeys, subKeyList)
return csvOutput
def writeCSVToFile(fetchInfo, tableName, topKeyList, subKeyList):
allKeys = topKeyList[:]
for parKey in sorted(subKeyList.keys()):
allKeys.append(parKey)
fileName = args.dest + '/' + epochNow + '_' + tableName + '.csv'
headerPresent = checkHeaderForAttribute(fileName, 'displayName')
statusCSV = openFileHandler(fileName)
if headerPresent == False:
header = 'displayName,' + str(','.join(map(str, topKeyList)))
for child in sorted(subKeyList.keys()):
header += ',' + (str(','.join(map(str, subKeyList[child]))))
statusCSV.write(header + "\n")
statusCSV.write(processAPIInfo(fetchInfo, allKeys, subKeyList).rstrip(',') + "\n")
statusCSV.close()
def exec_jobInfo(tableName):
jobInfoKeys = ['jobTitle', 'reportsTo', 'location', 'division', 'department', 'date']
fetchInfo = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '/tables/' + tableName, 'json')
if len(fetchInfo) > 0:
writeCSVToFile(fetchInfo, tableName, jobInfoKeys, {})
def exec_employmentStatus(tableName):
statusKeys = ['employmentStatus', 'employeeId', 'date']
fetchInfo = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '/tables/' + tableName, 'json')
if len(fetchInfo) > 0:
writeCSVToFile(fetchInfo, tableName, statusKeys, {})
def exec_emergencyContacts(tableName):
contactKeys = ['employeeId', 'name', 'relationship', 'homePhone', 'addressLine1', 'addressLine2', 'mobilePhone',
'email', 'zipcode', 'city', 'state', 'country', 'workPhone', 'workPhoneExtension']
fetchInfo = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '/tables/' + tableName, 'json')
if len(fetchInfo) > 0:
writeCSVToFile(fetchInfo, tableName, contactKeys, {})
def exec_compensation(tableName):
compKeys = ['type', 'payPeriod', 'employeeId', 'startDate']
subKeys = {'rate': ['currency', 'value']}
fetchInfo = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '/tables/' + tableName, 'json')
if len(fetchInfo) > 0:
writeCSVToFile(fetchInfo, tableName, compKeys, subKeys)
def exec_customBankDetails(tableName):
bankKeys = ['employeeId', 'customBankName', 'customAccountNumber']
fetchInfo = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '/tables/' + tableName, 'json')
if len(fetchInfo) > 0:
writeCSVToFile(fetchInfo, tableName, bankKeys, {})
def exec_customRSADetails(tableName):
rsaKeys = ['employeeId', 'customPFAName', 'customRSANumber']
fetchInfo = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '/tables/' + tableName, 'json')
if len(fetchInfo) > 0:
writeCSVToFile(fetchInfo, tableName, rsaKeys, {})
def exec_employeedependents(tableName):
depKeys = ['employeeId', 'firstName', 'middleName', 'lastName', 'relationship', 'gender', 'dateOfBirth',
'addressLine1', 'addressLine2', 'city', 'state', 'zipCode', 'homePhone', 'country', 'isUsCitizen',
'isStudent']
fetchInfo = fetchFromAPI(APIPrefix + '/' + tableName + '/?employeeid=' + str(employeeID), 'json')
if len(fetchInfo['Employee Dependents']) > 0:
writeCSVToFile(fetchInfo['Employee Dependents'], tableName, depKeys, {})
def processDict(arg, catName):
spaces = [' ']
return sub(u'(?u)[' + escape(''.join(spaces)) + ']', '_',
str(args.dest + catName + '/' + arg['dateCreated'] + '_' + arg['name']))
def downloadDocuments(employeeID):
fetchInfo = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '/files/view', 'xml')
obj = xmltodict.parse(fetchInfo)
for i in range(len(obj['employee']['category'])):
catName = obj['employee']['category'][i]['name']
try:
if isinstance(obj['employee']['category'][i]['file'], list):
for ind in range(len(obj['employee']['category'][i]['file'])):
filename = processDict(obj['employee']['category'][i]['file'][ind], catName)
fetchBinaryFile(APIPrefix + '/employees/' + str(employeeID) + '/files/' +
str(obj['employee']['category'][i]['file'][ind]['@id']) + '/', filename)
elif isinstance(obj['employee']['category'][i]['file'], dict):
filename = processDict(obj['employee']['category'][i]['file'], catName)
fetchBinaryFile(APIPrefix + '/employees/' + str(employeeID) + '/files/' +
str(obj['employee']['category'][i]['file']['@id']) + '/', filename)
else:
print(type(obj['employee']['category'][i]['file']))
except KeyError:
pass
# Key sets
userKeys = ['id', 'address1', 'address2', 'age', 'bestEmail', 'city', 'country', 'dateOfBirth',
'employeeNumber', 'employmentHistoryStatus', 'firstName', 'fullName1', 'fullName2', 'fullName3', 'fullName4',
'fullName5', 'gender', 'hireDate', 'homeEmail', 'homePhone', 'jobTitle', 'lastChanged', 'department',
'lastName', 'location', 'maritalStatus', 'middleName', 'mobilePhone', 'payChangeReason', 'payGroupId', 'payRate',
'payRateEffectiveDate', 'payType', 'paidPer', 'payPeriod', 'ssn', 'state', 'stateCode', 'supervisor',
'supervisorEId', 'terminationDate', 'workEmail', 'workPhone', 'workPhonePlusExtension', 'workPhoneExtension',
'zipcode', 'isPhotoUploaded', 'employmentStatus', 'nickname', 'photoUploaded', 'customBenefitDue', 'division',
'customBenefitDue', 'customCompany', 'customDateofConfirmation', 'customGrade1', 'customLagosGrade', 'customLevel',
'customNationalInsuranceNumber', 'customNationality', 'customNHFNumber', 'customNIC', 'customNigeriaMobilePhone',
'customNon-DomStatus', 'customPakistanMobilePhone', 'customRwandaMobilePhone', 'customStateofOrigin',
'customTaxIDNumber', 'customUKWorkPermit', 'supervisorId', 'displayName']
# Fetch the list of user IDs
userIDs = []
userIDGet = fetchFromAPI(APIPrefix + '/employees/directory', 'json')
for employee in userIDGet['employees']:
userIDs.append(employee['id'])
# for employeeID in userIDs:
for employeeID in userIDs:
# Do not run for ID 671 - Viv Diwakar
if employeeID != 671:
userInfoGet = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '?fields='
+ ','.join(map(str, userKeys)), 'json')
employee = userInfoGet['displayName']
writeCSVToFile(userInfoGet, 'employees', userKeys[:-1], {})
downloadDocuments(employeeID)
userPicUploaded = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '?fields=isPhotoUploaded', 'json')
if userPicUploaded['isPhotoUploaded'] == 'true':
fetchBinaryFile(APIPrefix + '/employees/' + str(employeeID) + '/photo/small',
sub(',', '', str(args.dest + '/photos/photo_employeeID_' + str(employeeID) + '_'
+ sub(' ', '_', employee) + '.jpg')))
for table in userTables:
locals()[str('exec_' + table)](table)
| 42.242063 | 119 | 0.626867 | import argparse
import datetime
import sys
import requests
from os import makedirs
from os.path import dirname, exists
from re import search, sub, escape
import xmltodict
parser = argparse.ArgumentParser()
parser.add_argument('auth', help='User API auth key.', type=str)
parser.add_argument('company', help='Company name within BambooHR.', type=str)
parser.add_argument('dest', help='Full path to CSV and artifacts destination.', type=str)
args = parser.parse_args()
epochNow = datetime.datetime.today().strftime('%Y%m%d_%s')
APIPrefix = 'https://api.bamboohr.com/api/gateway.php/' + args.company + '/v1'
userTables = ['jobInfo', 'employmentStatus', 'emergencyContacts', 'compensation', 'customBankDetails',
'customRSADetails', 'employeedependents']
def fetchFromAPI(url, outform):
try:
results = requests.get(url, headers={'Accept': 'application/json'}, auth=(args.auth, ":x"))
if results.status_code == 200:
if outform == 'json':
return results.json()
elif outform == 'xml':
return results.text
else:
sys.stderr.write('API Request error on "' + url + '"; exiting...' + "\n")
exit(1)
except (requests.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout) as e:
sys.stderr.write('ERROR: ' + str(e) + '; exiting...' + "\n")
exit(1)
def fetchBinaryFile(url, destination):
try:
binary = requests.get(url, headers={'Accept': 'application/json'}, auth=(args.auth, ":x"))
directory = dirname(destination)
if not exists(directory):
makedirs(directory)
with open(destination, 'wb') as f:
f.write(binary.content)
f.close()
except (requests.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout) as e:
sys.stderr.write('ERROR: ' + str(e) + '; exiting...' + "\n")
exit(1)
def openFileHandler(fileName):
try:
fh = open(fileName, 'a')
return fh
except (PermissionError, OSError, IOError) as e:
sys.stderr.write('ERROR: ' + str(e) + '; exiting...' + "\n")
exit(1)
def processAttrValue(String):
if str(String) == "None" or str(String) == "":
return '-,'
else:
if search("'", str(String)):
return sub("'", '', str(String)) + ','
elif search(",", str(String)):
return sub(r'(.*)', r'"\1"', str(String)) + ','
else:
return str(String) + ','
def checkHeaderForAttribute(fileName, keyword):
try:
fh = open(fileName, 'r')
firstLine = fh.readline()
fh.close()
if search(keyword, firstLine):
return True
else:
return False
except FileNotFoundError:
return False
except (PermissionError, OSError, IOError) as e:
sys.stderr.write('ERROR: ' + str(e) + '; exiting...' + "\n")
exit(1)
def processAPIInfo(httpReturn, allKeys, subKeyList):
csvOutput = ''
if isinstance(httpReturn, dict):
csvOutput = processAttrValue(employee)
for key in allKeys:
if key in subKeyList.keys():
for tag in subKeyList[key]:
csvOutput += processAttrValue(httpReturn[key][tag])
else:
csvOutput += processAttrValue(httpReturn[key])
else:
index = -1
for index in range(len(httpReturn) - 1):
csvOutput += processAPIInfo(httpReturn[index], allKeys, subKeyList) + '\n'
csvOutput += processAPIInfo(httpReturn[(index + 1)], allKeys, subKeyList)
return csvOutput
def writeCSVToFile(fetchInfo, tableName, topKeyList, subKeyList):
allKeys = topKeyList[:]
for parKey in sorted(subKeyList.keys()):
allKeys.append(parKey)
fileName = args.dest + '/' + epochNow + '_' + tableName + '.csv'
headerPresent = checkHeaderForAttribute(fileName, 'displayName')
statusCSV = openFileHandler(fileName)
if headerPresent == False:
header = 'displayName,' + str(','.join(map(str, topKeyList)))
for child in sorted(subKeyList.keys()):
header += ',' + (str(','.join(map(str, subKeyList[child]))))
statusCSV.write(header + "\n")
statusCSV.write(processAPIInfo(fetchInfo, allKeys, subKeyList).rstrip(',') + "\n")
statusCSV.close()
def exec_jobInfo(tableName):
jobInfoKeys = ['jobTitle', 'reportsTo', 'location', 'division', 'department', 'date']
fetchInfo = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '/tables/' + tableName, 'json')
if len(fetchInfo) > 0:
writeCSVToFile(fetchInfo, tableName, jobInfoKeys, {})
def exec_employmentStatus(tableName):
statusKeys = ['employmentStatus', 'employeeId', 'date']
fetchInfo = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '/tables/' + tableName, 'json')
if len(fetchInfo) > 0:
writeCSVToFile(fetchInfo, tableName, statusKeys, {})
def exec_emergencyContacts(tableName):
contactKeys = ['employeeId', 'name', 'relationship', 'homePhone', 'addressLine1', 'addressLine2', 'mobilePhone',
'email', 'zipcode', 'city', 'state', 'country', 'workPhone', 'workPhoneExtension']
fetchInfo = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '/tables/' + tableName, 'json')
if len(fetchInfo) > 0:
writeCSVToFile(fetchInfo, tableName, contactKeys, {})
def exec_compensation(tableName):
compKeys = ['type', 'payPeriod', 'employeeId', 'startDate']
subKeys = {'rate': ['currency', 'value']}
fetchInfo = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '/tables/' + tableName, 'json')
if len(fetchInfo) > 0:
writeCSVToFile(fetchInfo, tableName, compKeys, subKeys)
def exec_customBankDetails(tableName):
bankKeys = ['employeeId', 'customBankName', 'customAccountNumber']
fetchInfo = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '/tables/' + tableName, 'json')
if len(fetchInfo) > 0:
writeCSVToFile(fetchInfo, tableName, bankKeys, {})
def exec_customRSADetails(tableName):
rsaKeys = ['employeeId', 'customPFAName', 'customRSANumber']
fetchInfo = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '/tables/' + tableName, 'json')
if len(fetchInfo) > 0:
writeCSVToFile(fetchInfo, tableName, rsaKeys, {})
def exec_employeedependents(tableName):
depKeys = ['employeeId', 'firstName', 'middleName', 'lastName', 'relationship', 'gender', 'dateOfBirth',
'addressLine1', 'addressLine2', 'city', 'state', 'zipCode', 'homePhone', 'country', 'isUsCitizen',
'isStudent']
fetchInfo = fetchFromAPI(APIPrefix + '/' + tableName + '/?employeeid=' + str(employeeID), 'json')
if len(fetchInfo['Employee Dependents']) > 0:
writeCSVToFile(fetchInfo['Employee Dependents'], tableName, depKeys, {})
def processDict(arg, catName):
spaces = [' ']
return sub(u'(?u)[' + escape(''.join(spaces)) + ']', '_',
str(args.dest + catName + '/' + arg['dateCreated'] + '_' + arg['name']))
def downloadDocuments(employeeID):
fetchInfo = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '/files/view', 'xml')
obj = xmltodict.parse(fetchInfo)
for i in range(len(obj['employee']['category'])):
catName = obj['employee']['category'][i]['name']
try:
if isinstance(obj['employee']['category'][i]['file'], list):
for ind in range(len(obj['employee']['category'][i]['file'])):
filename = processDict(obj['employee']['category'][i]['file'][ind], catName)
fetchBinaryFile(APIPrefix + '/employees/' + str(employeeID) + '/files/' +
str(obj['employee']['category'][i]['file'][ind]['@id']) + '/', filename)
elif isinstance(obj['employee']['category'][i]['file'], dict):
filename = processDict(obj['employee']['category'][i]['file'], catName)
fetchBinaryFile(APIPrefix + '/employees/' + str(employeeID) + '/files/' +
str(obj['employee']['category'][i]['file']['@id']) + '/', filename)
else:
print(type(obj['employee']['category'][i]['file']))
except KeyError:
pass
userKeys = ['id', 'address1', 'address2', 'age', 'bestEmail', 'city', 'country', 'dateOfBirth',
'employeeNumber', 'employmentHistoryStatus', 'firstName', 'fullName1', 'fullName2', 'fullName3', 'fullName4',
'fullName5', 'gender', 'hireDate', 'homeEmail', 'homePhone', 'jobTitle', 'lastChanged', 'department',
'lastName', 'location', 'maritalStatus', 'middleName', 'mobilePhone', 'payChangeReason', 'payGroupId', 'payRate',
'payRateEffectiveDate', 'payType', 'paidPer', 'payPeriod', 'ssn', 'state', 'stateCode', 'supervisor',
'supervisorEId', 'terminationDate', 'workEmail', 'workPhone', 'workPhonePlusExtension', 'workPhoneExtension',
'zipcode', 'isPhotoUploaded', 'employmentStatus', 'nickname', 'photoUploaded', 'customBenefitDue', 'division',
'customBenefitDue', 'customCompany', 'customDateofConfirmation', 'customGrade1', 'customLagosGrade', 'customLevel',
'customNationalInsuranceNumber', 'customNationality', 'customNHFNumber', 'customNIC', 'customNigeriaMobilePhone',
'customNon-DomStatus', 'customPakistanMobilePhone', 'customRwandaMobilePhone', 'customStateofOrigin',
'customTaxIDNumber', 'customUKWorkPermit', 'supervisorId', 'displayName']
userIDs = []
userIDGet = fetchFromAPI(APIPrefix + '/employees/directory', 'json')
for employee in userIDGet['employees']:
userIDs.append(employee['id'])
for employeeID in userIDs:
if employeeID != 671:
userInfoGet = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '?fields='
+ ','.join(map(str, userKeys)), 'json')
employee = userInfoGet['displayName']
writeCSVToFile(userInfoGet, 'employees', userKeys[:-1], {})
downloadDocuments(employeeID)
userPicUploaded = fetchFromAPI(APIPrefix + '/employees/' + str(employeeID) + '?fields=isPhotoUploaded', 'json')
if userPicUploaded['isPhotoUploaded'] == 'true':
fetchBinaryFile(APIPrefix + '/employees/' + str(employeeID) + '/photo/small',
sub(',', '', str(args.dest + '/photos/photo_employeeID_' + str(employeeID) + '_'
+ sub(' ', '_', employee) + '.jpg')))
for table in userTables:
locals()[str('exec_' + table)](table)
| true | true |
f71ea8648cb914829cbd5e2b6998226a113ea3c8 | 2,831 | py | Python | payment.py | trytonus/trytond-magento | f27e8d136e5e222fdf86b679d10d468de38262eb | [
"BSD-3-Clause"
] | 3 | 2015-10-07T15:51:40.000Z | 2016-04-06T09:00:57.000Z | payment.py | trytonus/trytond-magento | f27e8d136e5e222fdf86b679d10d468de38262eb | [
"BSD-3-Clause"
] | 19 | 2015-07-28T14:24:24.000Z | 2016-07-13T06:02:35.000Z | payment.py | trytonus/trytond-magento | f27e8d136e5e222fdf86b679d10d468de38262eb | [
"BSD-3-Clause"
] | 15 | 2015-07-28T05:54:17.000Z | 2016-05-27T12:23:29.000Z | # -*- coding: utf-8 -*-
from trytond.pool import PoolMeta
from trytond.model import fields, ModelSQL, ModelView
from trytond.transaction import Transaction
__metaclass__ = PoolMeta
__all__ = ['MagentoPaymentGateway', 'Payment']
class MagentoPaymentGateway(ModelSQL, ModelView):
"""
This model maps the available payment gateways from magento to tryton.
"""
__name__ = 'magento.instance.payment_gateway'
_rec_name = 'title'
name = fields.Char("Name", required=True, select=True)
title = fields.Char('Title', required=True, select=True)
gateway = fields.Many2One(
'payment_gateway.gateway', 'Gateway', required=True,
ondelete='RESTRICT', select=True,
)
channel = fields.Many2One(
'sale.channel', 'Magento Channel', readonly=True, select=True,
domain=[('source', '=', 'magento')]
)
@classmethod
def __setup__(cls):
"""
Setup the class before adding to pool
"""
super(MagentoPaymentGateway, cls).__setup__()
cls._sql_constraints += [
(
'name_channel_unique', 'unique(name, channel)',
'Payment gateway already exist for this channel'
)
]
@classmethod
def create_all_using_magento_data(cls, magento_data):
"""
Creates record for list of payment gateways sent by magento.
It creates a new gateway only if one with the same name does not
exist for this channel.
"""
gateways = []
for data in magento_data:
gateway = cls.find_using_magento_data(data)
if gateway:
gateways.append(gateway)
else:
gateways.append(cls.create_using_magento_data(data))
return gateways
@classmethod
def create_using_magento_data(cls, gateway_data):
"""
Create record for gateway data sent by magento
"""
raise NotImplementedError
@classmethod
def find_using_magento_data(cls, gateway_data):
"""
Search for an existing gateway by matching name and channel.
If found, return its active record else None
"""
try:
gateway, = cls.search([
('name', '=', gateway_data['name']),
('channel', '=', Transaction().context['current_channel']),
])
except ValueError:
return None
else:
return gateway
class Payment:
__name__ = "sale.payment"
magento_id = fields.Integer('Magento ID', readonly=True)
@classmethod
def __setup__(cls):
"""
Setup the class before adding to pool
"""
super(Payment, cls).__setup__()
# TODO: Add validation to make sure payment magento id per channel
# is unique!
| 30.117021 | 75 | 0.604733 |
from trytond.pool import PoolMeta
from trytond.model import fields, ModelSQL, ModelView
from trytond.transaction import Transaction
__metaclass__ = PoolMeta
__all__ = ['MagentoPaymentGateway', 'Payment']
class MagentoPaymentGateway(ModelSQL, ModelView):
__name__ = 'magento.instance.payment_gateway'
_rec_name = 'title'
name = fields.Char("Name", required=True, select=True)
title = fields.Char('Title', required=True, select=True)
gateway = fields.Many2One(
'payment_gateway.gateway', 'Gateway', required=True,
ondelete='RESTRICT', select=True,
)
channel = fields.Many2One(
'sale.channel', 'Magento Channel', readonly=True, select=True,
domain=[('source', '=', 'magento')]
)
@classmethod
def __setup__(cls):
super(MagentoPaymentGateway, cls).__setup__()
cls._sql_constraints += [
(
'name_channel_unique', 'unique(name, channel)',
'Payment gateway already exist for this channel'
)
]
@classmethod
def create_all_using_magento_data(cls, magento_data):
gateways = []
for data in magento_data:
gateway = cls.find_using_magento_data(data)
if gateway:
gateways.append(gateway)
else:
gateways.append(cls.create_using_magento_data(data))
return gateways
@classmethod
def create_using_magento_data(cls, gateway_data):
raise NotImplementedError
@classmethod
def find_using_magento_data(cls, gateway_data):
try:
gateway, = cls.search([
('name', '=', gateway_data['name']),
('channel', '=', Transaction().context['current_channel']),
])
except ValueError:
return None
else:
return gateway
class Payment:
__name__ = "sale.payment"
magento_id = fields.Integer('Magento ID', readonly=True)
@classmethod
def __setup__(cls):
super(Payment, cls).__setup__()
| true | true |
f71ea87e24c61c899af7fdfebf66e69cf9fcd73b | 1,841 | py | Python | neutron/db/migration/alembic_migrations/agent_init_ops.py | NeCTAR-RC/neutron | acf78cc3c88aff638180819419a65145a9a79695 | [
"Apache-2.0"
] | 1 | 2019-01-13T04:42:21.000Z | 2019-01-13T04:42:21.000Z | neutron/db/migration/alembic_migrations/agent_init_ops.py | NeCTAR-RC/neutron | acf78cc3c88aff638180819419a65145a9a79695 | [
"Apache-2.0"
] | null | null | null | neutron/db/migration/alembic_migrations/agent_init_ops.py | NeCTAR-RC/neutron | acf78cc3c88aff638180819419a65145a9a79695 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial operations for agent management extension
# This module only manages the 'agents' table. Binding tables are created
# in the modules for relevant resources
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'agents',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('agent_type', sa.String(length=255), nullable=False),
sa.Column('binary', sa.String(length=255), nullable=False),
sa.Column('topic', sa.String(length=255), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False,
server_default=sa.sql.true()),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('started_at', sa.DateTime(), nullable=False),
sa.Column('heartbeat_timestamp', sa.DateTime(), nullable=False),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('configurations', sa.String(length=4095), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('agent_type', 'host',
name='uniq_agents0agent_type0host'))
| 42.813953 | 78 | 0.681152 |
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'agents',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('agent_type', sa.String(length=255), nullable=False),
sa.Column('binary', sa.String(length=255), nullable=False),
sa.Column('topic', sa.String(length=255), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False,
server_default=sa.sql.true()),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('started_at', sa.DateTime(), nullable=False),
sa.Column('heartbeat_timestamp', sa.DateTime(), nullable=False),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('configurations', sa.String(length=4095), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('agent_type', 'host',
name='uniq_agents0agent_type0host'))
| true | true |
f71ea8a17abddd1dcc7b6d5c987d32eb1bff55b7 | 11,642 | py | Python | nidm/experiment/tests/test_query.py | tvanerp/PyNIDM | 6a94875969c6bc5247b09d7d2793ed979b18ab3f | [
"Apache-2.0"
] | null | null | null | nidm/experiment/tests/test_query.py | tvanerp/PyNIDM | 6a94875969c6bc5247b09d7d2793ed979b18ab3f | [
"Apache-2.0"
] | null | null | null | nidm/experiment/tests/test_query.py | tvanerp/PyNIDM | 6a94875969c6bc5247b09d7d2793ed979b18ab3f | [
"Apache-2.0"
] | null | null | null | import pytest
from nidm.experiment import Project, Session, AssessmentAcquisition, AssessmentObject, Acquisition, AcquisitionObject, Query
from nidm.core import Constants
from rdflib import Namespace,URIRef
import prov.model as pm
from os import remove
import pprint
from prov.model import ProvDocument, QualifiedName
from prov.model import Namespace as provNamespace
import json
import urllib.request
from pathlib import Path
# when set to true, this will test example NIDM files downloaded from
# the GitHub dbkeator/simple2_NIDM_examples repo
#
# DBK: this is a bit unsafe as the TTL files in the github repo above can change and the UUID will change since they are randomly
# generated at this point. It's probably more robust to explicitly create these files for the time being and explicitly set the
# UUID in the test file:
# For example: kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseIII",Constants.NIDM_PROJECT_IDENTIFIER:1200,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation2"}
# project = Project(uuid="_654321",attributes=kwargs)
USE_GITHUB_DATA = False
def test_GetProjectMetadata():
kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseII",Constants.NIDM_PROJECT_IDENTIFIER:9610,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation"}
project = Project(uuid="_123456",attributes=kwargs)
#save a turtle file
with open("test.ttl",'w') as f:
f.write(project.serializeTurtle())
kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseIII",Constants.NIDM_PROJECT_IDENTIFIER:1200,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation2"}
project = Project(uuid="_654321",attributes=kwargs)
#save a turtle file
with open("test2.ttl",'w') as f:
f.write(project.serializeTurtle())
#WIP test = Query.GetProjectMetadata(["test.ttl", "test2.ttl"])
#assert URIRef(Constants.NIDM + "_654321") in test
#assert URIRef(Constants.NIDM + "_123456") in test
#assert URIRef(Constants.NIDM_PROJECT_IDENTIFIER + "1200") in test
#assert URIRef(Constants.NIDM_PROJECT_IDENTIFIER + "9610") in test
#assert URIRef((Constants.NIDM_PROJECT_NAME + "FBIRN_PhaseII")) in test
#assert URIRef((Constants.NIDM_PROJECT_NAME + "FBIRN_PhaseIII")) in test
#assert URIRef((Constants.NIDM_PROJECT_DESCRIPTION + "Test investigation")) in test
#assert URIRef((Constants.NIDM_PROJECT_DESCRIPTION + "Test investigation2")) in test
remove("test2.ttl")
def test_GetProjects():
kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseII",Constants.NIDM_PROJECT_IDENTIFIER:9610,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation"}
project = Project(uuid="_123456",attributes=kwargs)
#save a turtle file
with open("test.ttl",'w') as f:
f.write(project.serializeTurtle())
project_list = Query.GetProjectsUUID(["test.ttl"])
remove("test.ttl")
assert URIRef(Constants.NIIRI + "_123456") in project_list
def test_GetParticipantIDs():
kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseII",Constants.NIDM_PROJECT_IDENTIFIER:9610,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation"}
project = Project(uuid="_123456",attributes=kwargs)
session = Session(uuid="_13579",project=project)
acq = Acquisition(uuid="_15793",session=session)
acq2 = Acquisition(uuid="_15795",session=session)
person=acq.add_person(attributes=({Constants.NIDM_SUBJECTID:"9999"}))
acq.add_qualified_association(person=person,role=Constants.NIDM_PARTICIPANT)
person2=acq2.add_person(attributes=({Constants.NIDM_SUBJECTID:"8888"}))
acq2.add_qualified_association(person=person2,role=Constants.NIDM_PARTICIPANT)
#save a turtle file
with open("test.ttl",'w') as f:
f.write(project.serializeTurtle())
participant_list = Query.GetParticipantIDs(["test.ttl"])
remove("test.ttl")
assert (participant_list['ID'].str.contains('9999').any())
assert (participant_list['ID'].str.contains('8888').any())
def test_GetProjectInstruments():
kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseII",Constants.NIDM_PROJECT_IDENTIFIER:9610,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation"}
project = Project(uuid="_123456",attributes=kwargs)
session = Session(project)
acq = AssessmentAcquisition(session)
kwargs={pm.PROV_TYPE:pm.QualifiedName(pm.Namespace("nidm",Constants.NIDM),"NorthAmericanAdultReadingTest")}
acq_obj = AssessmentObject(acq,attributes=kwargs)
acq2 = AssessmentAcquisition(session)
kwargs={pm.PROV_TYPE:pm.QualifiedName(pm.Namespace("nidm",Constants.NIDM),"PositiveAndNegativeSyndromeScale")}
acq_obj2 = AssessmentObject(acq2,attributes=kwargs)
#save a turtle file
with open("test.ttl",'w') as f:
f.write(project.serializeTurtle())
assessment_list = Query.GetProjectInstruments(["test.ttl"],"_123456")
#remove("test.ttl")
assert URIRef(Constants.NIDM + "NorthAmericanAdultReadingTest") in assessment_list['assessment_type'].to_list()
assert URIRef(Constants.NIDM + "PositiveAndNegativeSyndromeScale") in assessment_list['assessment_type'].to_list()
'''
The test data file could/should have the following project meta data. Taken from
https://raw.githubusercontent.com/incf-nidash/nidm/master/nidm/nidm-experiment/terms/nidm-experiment.owl
- descrption
- fileName
- license
- source
- title
- hadNumericalValue ???
- BathSolution ???
- CellType
- ChannelNumber
- ElectrodeImpedance
- GroupLabel
- HollowElectrodeSolution
- hadImageContrastType
- hadImageUsageType
- NumberOfChannels
- AppliedFilter
- SolutionFlowSpeed
- RecordingLocation
Returns the
'''
def saveTestFile(file_name, data):
project = Project(uuid="_123_" + file_name, attributes=data)
return saveProject(file_name, project)
def saveProject(file_name, project):
# save a turtle file
with open(file_name, 'w') as f:
f.write(project.serializeTurtle())
return "nidm:_123_{}".format(file_name)
def makeProjectTestFile(filename):
DCTYPES = Namespace("http://purl.org/dc/dcmitype/")
kwargs = {Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseII", # this is the "title"
Constants.NIDM_PROJECT_IDENTIFIER: 9610,
Constants.NIDM_PROJECT_DESCRIPTION: "Test investigation",
Constants.NIDM_FILENAME: "testfile.ttl",
Constants.NIDM_PROJECT_LICENSE: "MIT Licence",
Constants.NIDM_PROJECT_SOURCE: "Educational Source",
Constants.NIDM_HAD_NUMERICAL_VALUE: "numval???",
Constants.NIDM_BATH_SOLUTION: "bath",
Constants.NIDM_CELL_TYPE: "ctype",
Constants.NIDM_CHANNEL_NUMBER: "5",
Constants.NIDM_ELECTRODE_IMPEDANCE: ".01",
Constants.NIDM_GROUP_LABEL: "group 123",
Constants.NIDM_HOLLOW_ELECTRODE_SOLUTION: "water",
Constants.NIDM_HAD_IMAGE_CONTRACT_TYPE: "off",
Constants.NIDM_HAD_IMAGE_USAGE_TYPE: "abcd",
Constants.NIDM_NUBMER_OF_CHANNELS: "11",
Constants.NIDM_APPLIED_FILTER: "on",
Constants.NIDM_SOLUTION_FLOW_SPEED: "2.8",
Constants.NIDM_RECORDING_LOCATION: "lab"
}
return saveTestFile(filename, kwargs)
def makeProjectTestFile2(filename):
DCTYPES = Namespace("http://purl.org/dc/dcmitype/")
kwargs = {Constants.NIDM_PROJECT_NAME: "TEST B", # this is the "title"
Constants.NIDM_PROJECT_IDENTIFIER: 1234,
Constants.NIDM_PROJECT_DESCRIPTION: "More Scans",
Constants.NIDM_FILENAME: "testfile2.ttl",
Constants.NIDM_PROJECT_LICENSE: "Creative Commons",
Constants.NIDM_PROJECT_SOURCE: "Other",
Constants.NIDM_HAD_NUMERICAL_VALUE: "numval???",
Constants.NIDM_BATH_SOLUTION: "bath",
Constants.NIDM_CELL_TYPE: "ctype",
Constants.NIDM_CHANNEL_NUMBER: "5",
Constants.NIDM_ELECTRODE_IMPEDANCE: ".01",
Constants.NIDM_GROUP_LABEL: "group 123",
Constants.NIDM_HOLLOW_ELECTRODE_SOLUTION: "water",
Constants.NIDM_HAD_IMAGE_CONTRACT_TYPE: "off",
Constants.NIDM_HAD_IMAGE_USAGE_TYPE: "abcd",
Constants.NIDM_NUBMER_OF_CHANNELS: "11",
Constants.NIDM_APPLIED_FILTER: "on",
Constants.NIDM_SOLUTION_FLOW_SPEED: "2.8",
Constants.NIDM_RECORDING_LOCATION: "lab"
}
project = Project(uuid="_123_" + filename, attributes=kwargs)
s1 = Session(project)
a1 = AssessmentAcquisition(session=s1)
# = s1.add_acquisition("a1", attributes={"http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#Age" : 22})
p1 = a1.add_person("p1", attributes={Constants.NIDM_GIVEN_NAME:"George", Constants.NIDM_AGE: 22})
a1.add_qualified_association(person=p1, role=Constants.NIDM_PARTICIPANT)
return saveProject(filename, project)
def test_GetProjectsMetadata():
p1 = makeProjectTestFile("testfile.ttl")
p2 = makeProjectTestFile2("testfile2.ttl")
files = ["testfile.ttl", "testfile2.ttl"]
if USE_GITHUB_DATA and not Path('./cmu_a.nidm.ttl').is_file():
urllib.request.urlretrieve (
"https://raw.githubusercontent.com/dbkeator/simple2_NIDM_examples/master/datasets.datalad.org/abide/RawDataBIDS/CMU_a/nidm.ttl",
"cmu_a.nidm.ttl"
)
files.append("cmu_a.nidm.ttl")
parsed = Query.GetProjectsMetadata(files)
# assert parsed['projects'][p1][str(Constants.NIDM_PROJECT_DESCRIPTION)] == "Test investigation"
# assert parsed['projects'][p2][str(Constants.NIDM_PROJECT_DESCRIPTION)] == "More Scans"
# we shouldn't have the computed metadata in this result
# assert parsed['projects'][p1].get (Query.matchPrefix(str(Constants.NIDM_NUMBER_OF_SUBJECTS)), -1) == -1
if USE_GITHUB_DATA:
# find the project ID from the CMU file
for project_id in parsed['projects']:
if project_id != p1 and project_id != p2:
p3 = project_id
assert parsed['projects'][p3][str(Constants.NIDM_PROJECT_NAME)] == "ABIDE CMU_a Site"
def test_GetProjectsComputedMetadata():
p1 = makeProjectTestFile("testfile.ttl")
p2 = makeProjectTestFile2("testfile2.ttl")
files = ["testfile.ttl", "testfile2.ttl"]
if USE_GITHUB_DATA and not Path('./cmu_a.nidm.ttl').is_file():
urllib.request.urlretrieve (
"https://raw.githubusercontent.com/dbkeator/simple2_NIDM_examples/master/datasets.datalad.org/abide/RawDataBIDS/CMU_a/nidm.ttl",
"cmu_a.nidm.ttl"
)
files.append("cmu_a.nidm.ttl")
parsed = Query.GetProjectsComputedMetadata(files)
# assert parsed['projects'][p1][str(Constants.NIDM_PROJECT_DESCRIPTION)] == "Test investigation"
# assert parsed['projects'][p2][str(Constants.NIDM_PROJECT_DESCRIPTION)] == "More Scans"
# assert parsed['projects'][p2][Query.matchPrefix(str(Constants.NIDM_NUMBER_OF_SUBJECTS))] == 0
# assert parsed['projects'][p1][Query.matchPrefix(str(Constants.NIDM_NUMBER_OF_SUBJECTS))] == 0
if USE_GITHUB_DATA:
for project_id in parsed['projects']:
if project_id != p1 and project_id != p2:
p3 = project_id
assert parsed['projects'][p3][str(Constants.NIDM_PROJECT_NAME)] == "ABIDE CMU_a Site"
assert parsed['projects'][p3][Query.matchPrefix(str(Constants.NIDM_NUMBER_OF_SUBJECTS))] == 14
assert parsed['projects'][p3]["age_min"] == 21
assert parsed['projects'][p3]["age_max"] == 33
assert parsed['projects'][p3][str(Constants.NIDM_GENDER)] == ['1', '2']
| 40.006873 | 165 | 0.707954 | import pytest
from nidm.experiment import Project, Session, AssessmentAcquisition, AssessmentObject, Acquisition, AcquisitionObject, Query
from nidm.core import Constants
from rdflib import Namespace,URIRef
import prov.model as pm
from os import remove
import pprint
from prov.model import ProvDocument, QualifiedName
from prov.model import Namespace as provNamespace
import json
import urllib.request
from pathlib import Path
# UUID in the test file:
# For example: kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseIII",Constants.NIDM_PROJECT_IDENTIFIER:1200,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation2"}
# project = Project(uuid="_654321",attributes=kwargs)
USE_GITHUB_DATA = False
def test_GetProjectMetadata():
kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseII",Constants.NIDM_PROJECT_IDENTIFIER:9610,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation"}
project = Project(uuid="_123456",attributes=kwargs)
#save a turtle file
with open("test.ttl",'w') as f:
f.write(project.serializeTurtle())
kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseIII",Constants.NIDM_PROJECT_IDENTIFIER:1200,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation2"}
project = Project(uuid="_654321",attributes=kwargs)
#save a turtle file
with open("test2.ttl",'w') as f:
f.write(project.serializeTurtle())
#WIP test = Query.GetProjectMetadata(["test.ttl", "test2.ttl"])
#assert URIRef(Constants.NIDM + "_654321") in test
#assert URIRef(Constants.NIDM + "_123456") in test
#assert URIRef(Constants.NIDM_PROJECT_IDENTIFIER + "1200") in test
#assert URIRef(Constants.NIDM_PROJECT_IDENTIFIER + "9610") in test
#assert URIRef((Constants.NIDM_PROJECT_NAME + "FBIRN_PhaseII")) in test
#assert URIRef((Constants.NIDM_PROJECT_NAME + "FBIRN_PhaseIII")) in test
#assert URIRef((Constants.NIDM_PROJECT_DESCRIPTION + "Test investigation")) in test
#assert URIRef((Constants.NIDM_PROJECT_DESCRIPTION + "Test investigation2")) in test
remove("test2.ttl")
def test_GetProjects():
kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseII",Constants.NIDM_PROJECT_IDENTIFIER:9610,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation"}
project = Project(uuid="_123456",attributes=kwargs)
#save a turtle file
with open("test.ttl",'w') as f:
f.write(project.serializeTurtle())
project_list = Query.GetProjectsUUID(["test.ttl"])
remove("test.ttl")
assert URIRef(Constants.NIIRI + "_123456") in project_list
def test_GetParticipantIDs():
kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseII",Constants.NIDM_PROJECT_IDENTIFIER:9610,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation"}
project = Project(uuid="_123456",attributes=kwargs)
session = Session(uuid="_13579",project=project)
acq = Acquisition(uuid="_15793",session=session)
acq2 = Acquisition(uuid="_15795",session=session)
person=acq.add_person(attributes=({Constants.NIDM_SUBJECTID:"9999"}))
acq.add_qualified_association(person=person,role=Constants.NIDM_PARTICIPANT)
person2=acq2.add_person(attributes=({Constants.NIDM_SUBJECTID:"8888"}))
acq2.add_qualified_association(person=person2,role=Constants.NIDM_PARTICIPANT)
#save a turtle file
with open("test.ttl",'w') as f:
f.write(project.serializeTurtle())
participant_list = Query.GetParticipantIDs(["test.ttl"])
remove("test.ttl")
assert (participant_list['ID'].str.contains('9999').any())
assert (participant_list['ID'].str.contains('8888').any())
def test_GetProjectInstruments():
kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseII",Constants.NIDM_PROJECT_IDENTIFIER:9610,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation"}
project = Project(uuid="_123456",attributes=kwargs)
session = Session(project)
acq = AssessmentAcquisition(session)
kwargs={pm.PROV_TYPE:pm.QualifiedName(pm.Namespace("nidm",Constants.NIDM),"NorthAmericanAdultReadingTest")}
acq_obj = AssessmentObject(acq,attributes=kwargs)
acq2 = AssessmentAcquisition(session)
kwargs={pm.PROV_TYPE:pm.QualifiedName(pm.Namespace("nidm",Constants.NIDM),"PositiveAndNegativeSyndromeScale")}
acq_obj2 = AssessmentObject(acq2,attributes=kwargs)
#save a turtle file
with open("test.ttl",'w') as f:
f.write(project.serializeTurtle())
assessment_list = Query.GetProjectInstruments(["test.ttl"],"_123456")
#remove("test.ttl")
assert URIRef(Constants.NIDM + "NorthAmericanAdultReadingTest") in assessment_list['assessment_type'].to_list()
assert URIRef(Constants.NIDM + "PositiveAndNegativeSyndromeScale") in assessment_list['assessment_type'].to_list()
def saveTestFile(file_name, data):
project = Project(uuid="_123_" + file_name, attributes=data)
return saveProject(file_name, project)
def saveProject(file_name, project):
# save a turtle file
with open(file_name, 'w') as f:
f.write(project.serializeTurtle())
return "nidm:_123_{}".format(file_name)
def makeProjectTestFile(filename):
DCTYPES = Namespace("http://purl.org/dc/dcmitype/")
kwargs = {Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseII", # this is the "title"
Constants.NIDM_PROJECT_IDENTIFIER: 9610,
Constants.NIDM_PROJECT_DESCRIPTION: "Test investigation",
Constants.NIDM_FILENAME: "testfile.ttl",
Constants.NIDM_PROJECT_LICENSE: "MIT Licence",
Constants.NIDM_PROJECT_SOURCE: "Educational Source",
Constants.NIDM_HAD_NUMERICAL_VALUE: "numval???",
Constants.NIDM_BATH_SOLUTION: "bath",
Constants.NIDM_CELL_TYPE: "ctype",
Constants.NIDM_CHANNEL_NUMBER: "5",
Constants.NIDM_ELECTRODE_IMPEDANCE: ".01",
Constants.NIDM_GROUP_LABEL: "group 123",
Constants.NIDM_HOLLOW_ELECTRODE_SOLUTION: "water",
Constants.NIDM_HAD_IMAGE_CONTRACT_TYPE: "off",
Constants.NIDM_HAD_IMAGE_USAGE_TYPE: "abcd",
Constants.NIDM_NUBMER_OF_CHANNELS: "11",
Constants.NIDM_APPLIED_FILTER: "on",
Constants.NIDM_SOLUTION_FLOW_SPEED: "2.8",
Constants.NIDM_RECORDING_LOCATION: "lab"
}
return saveTestFile(filename, kwargs)
def makeProjectTestFile2(filename):
DCTYPES = Namespace("http://purl.org/dc/dcmitype/")
kwargs = {Constants.NIDM_PROJECT_NAME: "TEST B", # this is the "title"
Constants.NIDM_PROJECT_IDENTIFIER: 1234,
Constants.NIDM_PROJECT_DESCRIPTION: "More Scans",
Constants.NIDM_FILENAME: "testfile2.ttl",
Constants.NIDM_PROJECT_LICENSE: "Creative Commons",
Constants.NIDM_PROJECT_SOURCE: "Other",
Constants.NIDM_HAD_NUMERICAL_VALUE: "numval???",
Constants.NIDM_BATH_SOLUTION: "bath",
Constants.NIDM_CELL_TYPE: "ctype",
Constants.NIDM_CHANNEL_NUMBER: "5",
Constants.NIDM_ELECTRODE_IMPEDANCE: ".01",
Constants.NIDM_GROUP_LABEL: "group 123",
Constants.NIDM_HOLLOW_ELECTRODE_SOLUTION: "water",
Constants.NIDM_HAD_IMAGE_CONTRACT_TYPE: "off",
Constants.NIDM_HAD_IMAGE_USAGE_TYPE: "abcd",
Constants.NIDM_NUBMER_OF_CHANNELS: "11",
Constants.NIDM_APPLIED_FILTER: "on",
Constants.NIDM_SOLUTION_FLOW_SPEED: "2.8",
Constants.NIDM_RECORDING_LOCATION: "lab"
}
project = Project(uuid="_123_" + filename, attributes=kwargs)
s1 = Session(project)
a1 = AssessmentAcquisition(session=s1)
# = s1.add_acquisition("a1", attributes={"http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#Age" : 22})
p1 = a1.add_person("p1", attributes={Constants.NIDM_GIVEN_NAME:"George", Constants.NIDM_AGE: 22})
a1.add_qualified_association(person=p1, role=Constants.NIDM_PARTICIPANT)
return saveProject(filename, project)
def test_GetProjectsMetadata():
p1 = makeProjectTestFile("testfile.ttl")
p2 = makeProjectTestFile2("testfile2.ttl")
files = ["testfile.ttl", "testfile2.ttl"]
if USE_GITHUB_DATA and not Path('./cmu_a.nidm.ttl').is_file():
urllib.request.urlretrieve (
"https://raw.githubusercontent.com/dbkeator/simple2_NIDM_examples/master/datasets.datalad.org/abide/RawDataBIDS/CMU_a/nidm.ttl",
"cmu_a.nidm.ttl"
)
files.append("cmu_a.nidm.ttl")
parsed = Query.GetProjectsMetadata(files)
# assert parsed['projects'][p1][str(Constants.NIDM_PROJECT_DESCRIPTION)] == "Test investigation"
# assert parsed['projects'][p2][str(Constants.NIDM_PROJECT_DESCRIPTION)] == "More Scans"
# we shouldn't have the computed metadata in this result
if USE_GITHUB_DATA:
for project_id in parsed['projects']:
if project_id != p1 and project_id != p2:
p3 = project_id
assert parsed['projects'][p3][str(Constants.NIDM_PROJECT_NAME)] == "ABIDE CMU_a Site"
def test_GetProjectsComputedMetadata():
p1 = makeProjectTestFile("testfile.ttl")
p2 = makeProjectTestFile2("testfile2.ttl")
files = ["testfile.ttl", "testfile2.ttl"]
if USE_GITHUB_DATA and not Path('./cmu_a.nidm.ttl').is_file():
urllib.request.urlretrieve (
"https://raw.githubusercontent.com/dbkeator/simple2_NIDM_examples/master/datasets.datalad.org/abide/RawDataBIDS/CMU_a/nidm.ttl",
"cmu_a.nidm.ttl"
)
files.append("cmu_a.nidm.ttl")
parsed = Query.GetProjectsComputedMetadata(files)
if USE_GITHUB_DATA:
for project_id in parsed['projects']:
if project_id != p1 and project_id != p2:
p3 = project_id
assert parsed['projects'][p3][str(Constants.NIDM_PROJECT_NAME)] == "ABIDE CMU_a Site"
assert parsed['projects'][p3][Query.matchPrefix(str(Constants.NIDM_NUMBER_OF_SUBJECTS))] == 14
assert parsed['projects'][p3]["age_min"] == 21
assert parsed['projects'][p3]["age_max"] == 33
assert parsed['projects'][p3][str(Constants.NIDM_GENDER)] == ['1', '2']
| true | true |
f71eaa69788aff0d572f50f2d2f88af0daf622b4 | 624 | py | Python | pizdyuk/test.py | DeathAdder1999/Pizdyuk | 3fd7c71508c79b36e3cc801d78cd1a87eee5aa0b | [
"Apache-2.0"
] | 1 | 2021-05-06T20:23:08.000Z | 2021-05-06T20:23:08.000Z | pizdyuk/test.py | aufdnb/Pizdyuk | 75096ffa54df831eb05360d7b39f49000d466f80 | [
"Apache-2.0"
] | null | null | null | pizdyuk/test.py | aufdnb/Pizdyuk | 75096ffa54df831eb05360d7b39f49000d466f80 | [
"Apache-2.0"
] | null | null | null | import csv
import random
from pzd_constants import DATE_FORMAT
from datetime import datetime, timedelta
date = None
price = 0
with open('stock_data/aapl.csv', mode="r") as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
date = datetime.strptime(row[0], DATE_FORMAT)
price = float(row[1])
with open('stock_data/aapl.csv', mode="w+") as f:
writer = csv.writer(f, delimiter=",")
for i in range(1, 360):
date = date + timedelta(seconds=1)
price = round(random.uniform(price - 1, price + 1), 2)
writer.writerow([datetime.strftime(date, DATE_FORMAT), price]) | 29.714286 | 70 | 0.655449 | import csv
import random
from pzd_constants import DATE_FORMAT
from datetime import datetime, timedelta
date = None
price = 0
with open('stock_data/aapl.csv', mode="r") as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
date = datetime.strptime(row[0], DATE_FORMAT)
price = float(row[1])
with open('stock_data/aapl.csv', mode="w+") as f:
writer = csv.writer(f, delimiter=",")
for i in range(1, 360):
date = date + timedelta(seconds=1)
price = round(random.uniform(price - 1, price + 1), 2)
writer.writerow([datetime.strftime(date, DATE_FORMAT), price]) | true | true |
f71eaaf92d5fa9575d3e1b1f9dadd5a505d8934d | 1,578 | py | Python | pirates/minigame/Distributed7StudTable.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 3 | 2021-02-25T06:38:13.000Z | 2022-03-22T07:00:15.000Z | pirates/minigame/Distributed7StudTable.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | null | null | null | pirates/minigame/Distributed7StudTable.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 1 | 2021-02-25T06:38:17.000Z | 2021-02-25T06:38:17.000Z | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.minigame.Distributed7StudTable
from pirates.minigame import PlayingCardGlobals
from pirates.minigame import DistributedPokerTable
from direct.interval.IntervalGlobal import *
from pandac.PandaModules import Point3, Vec3
from pirates.piratesbase import PLocalizer
class Distributed7StudTable(DistributedPokerTable.DistributedPokerTable):
__module__ = __name__
def __init__(self, cr):
DistributedPokerTable.DistributedPokerTable.__init__(self, cr, '7stud', numRounds=6)
self.maxCommunityCards = 0
self.maxHandCards = 7
self.gameType = 1
def getGameType(self):
return PlayingCardGlobals.SevenStud
def getInteractText(self):
return PLocalizer.InteractTable7StudPoker
def getSitDownText(self):
return PLocalizer.PokerSitDown7StudPoker
def dealerAnim(self, round):
deals = Sequence()
if round == 0:
if self.isLocalAvatarSeated():
self.gui.disableAction()
self.gui.clearTable()
for card in self.PocketCards:
card.hide()
if round == 1:
deals.append(self.dealPlayerCards(numCards=3))
if round in [2, 3, 4, 5]:
deals.append(self.dealPlayerCards(numCards=1))
return deals
def checkForVisiblePair(self):
return self.sevenStudCheckForVisiblePair(self.playerHands) | 35.066667 | 104 | 0.691381 |
from pirates.minigame import PlayingCardGlobals
from pirates.minigame import DistributedPokerTable
from direct.interval.IntervalGlobal import *
from pandac.PandaModules import Point3, Vec3
from pirates.piratesbase import PLocalizer
class Distributed7StudTable(DistributedPokerTable.DistributedPokerTable):
__module__ = __name__
def __init__(self, cr):
DistributedPokerTable.DistributedPokerTable.__init__(self, cr, '7stud', numRounds=6)
self.maxCommunityCards = 0
self.maxHandCards = 7
self.gameType = 1
def getGameType(self):
return PlayingCardGlobals.SevenStud
def getInteractText(self):
return PLocalizer.InteractTable7StudPoker
def getSitDownText(self):
return PLocalizer.PokerSitDown7StudPoker
def dealerAnim(self, round):
deals = Sequence()
if round == 0:
if self.isLocalAvatarSeated():
self.gui.disableAction()
self.gui.clearTable()
for card in self.PocketCards:
card.hide()
if round == 1:
deals.append(self.dealPlayerCards(numCards=3))
if round in [2, 3, 4, 5]:
deals.append(self.dealPlayerCards(numCards=1))
return deals
def checkForVisiblePair(self):
return self.sevenStudCheckForVisiblePair(self.playerHands) | true | true |
f71eab8b25014501aa6d123e70fba4506c095cea | 10,366 | py | Python | youtube_dl/extractor/svt.py | NessDan/youtube-dl | 62280188e6fa692f1dd1253eb21eb4b7a5e5fc20 | [
"Unlicense"
] | 24 | 2017-03-17T10:27:12.000Z | 2022-02-16T05:55:50.000Z | youtube_dl/extractor/svt.py | NessDan/youtube-dl | 62280188e6fa692f1dd1253eb21eb4b7a5e5fc20 | [
"Unlicense"
] | 7 | 2017-07-26T08:15:27.000Z | 2018-09-20T12:56:53.000Z | youtube_dl/extractor/svt.py | NessDan/youtube-dl | 62280188e6fa692f1dd1253eb21eb4b7a5e5fc20 | [
"Unlicense"
] | 3 | 2017-03-17T10:27:13.000Z | 2019-01-28T01:19:17.000Z | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
determine_ext,
dict_get,
int_or_none,
try_get,
urljoin,
compat_str,
)
class SVTBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['SE']
def _extract_video(self, video_info, video_id):
is_live = dict_get(video_info, ('live', 'simulcast'), default=False)
m3u8_protocol = 'm3u8' if is_live else 'm3u8_native'
formats = []
for vr in video_info['videoReferences']:
player_type = vr.get('playerType') or vr.get('format')
vurl = vr['url']
ext = determine_ext(vurl)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
vurl, video_id,
ext='mp4', entry_protocol=m3u8_protocol,
m3u8_id=player_type, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
vurl + '?hdcore=3.3.0', video_id,
f4m_id=player_type, fatal=False))
elif ext == 'mpd':
if player_type == 'dashhbbtv':
formats.extend(self._extract_mpd_formats(
vurl, video_id, mpd_id=player_type, fatal=False))
else:
formats.append({
'format_id': player_type,
'url': vurl,
})
if not formats and video_info.get('rights', {}).get('geoBlockedSweden'):
self.raise_geo_restricted(
'This video is only available in Sweden',
countries=self._GEO_COUNTRIES)
self._sort_formats(formats)
subtitles = {}
subtitle_references = dict_get(video_info, ('subtitles', 'subtitleReferences'))
if isinstance(subtitle_references, list):
for sr in subtitle_references:
subtitle_url = sr.get('url')
subtitle_lang = sr.get('language', 'sv')
if subtitle_url:
if determine_ext(subtitle_url) == 'm3u8':
# TODO(yan12125): handle WebVTT in m3u8 manifests
continue
subtitles.setdefault(subtitle_lang, []).append({'url': subtitle_url})
title = video_info.get('title')
series = video_info.get('programTitle')
season_number = int_or_none(video_info.get('season'))
episode = video_info.get('episodeTitle')
episode_number = int_or_none(video_info.get('episodeNumber'))
duration = int_or_none(dict_get(video_info, ('materialLength', 'contentDuration')))
age_limit = None
adult = dict_get(
video_info, ('inappropriateForChildren', 'blockedForChildren'),
skip_false_values=False)
if adult is not None:
age_limit = 18 if adult else 0
return {
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
'duration': duration,
'age_limit': age_limit,
'series': series,
'season_number': season_number,
'episode': episode,
'episode_number': episode_number,
'is_live': is_live,
}
class SVTIE(SVTBaseIE):
_VALID_URL = r'https?://(?:www\.)?svt\.se/wd\?(?:.*?&)?widgetId=(?P<widget_id>\d+)&.*?\barticleId=(?P<id>\d+)'
_TEST = {
'url': 'http://www.svt.se/wd?widgetId=23991§ionId=541&articleId=2900353&type=embed&contextSectionId=123&autostart=false',
'md5': '33e9a5d8f646523ce0868ecfb0eed77d',
'info_dict': {
'id': '2900353',
'ext': 'mp4',
'title': 'Stjärnorna skojar till det - under SVT-intervjun',
'duration': 27,
'age_limit': 0,
},
}
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'(?:<iframe src|href)="(?P<url>%s[^"]*)"' % SVTIE._VALID_URL, webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
widget_id = mobj.group('widget_id')
article_id = mobj.group('id')
info = self._download_json(
'http://www.svt.se/wd?widgetId=%s&articleId=%s&format=json&type=embed&output=json' % (widget_id, article_id),
article_id)
info_dict = self._extract_video(info['video'], article_id)
info_dict['title'] = info['context']['title']
return info_dict
class SVTPlayBaseIE(SVTBaseIE):
_SVTPLAY_RE = r'root\s*\[\s*(["\'])_*svtplay\1\s*\]\s*=\s*(?P<json>{.+?})\s*;\s*\n'
class SVTPlayIE(SVTPlayBaseIE):
IE_DESC = 'SVT Play and Öppet arkiv'
_VALID_URL = r'https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se/(?:video|klipp|kanaler)/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.svtplay.se/video/5996901/flygplan-till-haile-selassie/flygplan-till-haile-selassie-2',
'md5': '2b6704fe4a28801e1a098bbf3c5ac611',
'info_dict': {
'id': '5996901',
'ext': 'mp4',
'title': 'Flygplan till Haile Selassie',
'duration': 3527,
'thumbnail': r're:^https?://.*[\.-]jpg$',
'age_limit': 0,
'subtitles': {
'sv': [{
'ext': 'wsrt',
}]
},
},
}, {
# geo restricted to Sweden
'url': 'http://www.oppetarkiv.se/video/5219710/trollflojten',
'only_matching': True,
}, {
'url': 'http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg',
'only_matching': True,
}, {
'url': 'https://www.svtplay.se/kanaler/svt1',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
data = self._parse_json(
self._search_regex(
self._SVTPLAY_RE, webpage, 'embedded data', default='{}',
group='json'),
video_id, fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
def adjust_title(info):
if info['is_live']:
info['title'] = self._live_title(info['title'])
if data:
video_info = try_get(
data, lambda x: x['context']['dispatcher']['stores']['VideoTitlePageStore']['data']['video'],
dict)
if video_info:
info_dict = self._extract_video(video_info, video_id)
info_dict.update({
'title': data['context']['dispatcher']['stores']['MetaStore']['title'],
'thumbnail': thumbnail,
})
adjust_title(info_dict)
return info_dict
video_id = self._search_regex(
r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)',
webpage, 'video id', default=None)
if video_id:
data = self._download_json(
'https://api.svt.se/videoplayer-api/video/%s' % video_id,
video_id, headers=self.geo_verification_headers())
info_dict = self._extract_video(data, video_id)
if not info_dict.get('title'):
info_dict['title'] = re.sub(
r'\s*\|\s*.+?$', '',
info_dict.get('episode') or self._og_search_title(webpage))
adjust_title(info_dict)
return info_dict
class SVTSeriesIE(SVTPlayBaseIE):
_VALID_URL = r'https?://(?:www\.)?svtplay\.se/(?P<id>[^/?&#]+)'
_TESTS = [{
'url': 'https://www.svtplay.se/rederiet',
'info_dict': {
'id': 'rederiet',
'title': 'Rederiet',
'description': 'md5:505d491a58f4fcf6eb418ecab947e69e',
},
'playlist_mincount': 318,
}, {
'url': 'https://www.svtplay.se/rederiet?tab=sasong2',
'info_dict': {
'id': 'rederiet-sasong2',
'title': 'Rederiet - Säsong 2',
'description': 'md5:505d491a58f4fcf6eb418ecab947e69e',
},
'playlist_count': 12,
}]
@classmethod
def suitable(cls, url):
return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTSeriesIE, cls).suitable(url)
def _real_extract(self, url):
series_id = self._match_id(url)
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
season_slug = qs.get('tab', [None])[0]
if season_slug:
series_id += '-%s' % season_slug
webpage = self._download_webpage(
url, series_id, 'Downloading series page')
root = self._parse_json(
self._search_regex(
self._SVTPLAY_RE, webpage, 'content', group='json'),
series_id)
season_name = None
entries = []
for season in root['relatedVideoContent']['relatedVideosAccordion']:
if not isinstance(season, dict):
continue
if season_slug:
if season.get('slug') != season_slug:
continue
season_name = season.get('name')
videos = season.get('videos')
if not isinstance(videos, list):
continue
for video in videos:
content_url = video.get('contentUrl')
if not content_url or not isinstance(content_url, compat_str):
continue
entries.append(
self.url_result(
urljoin(url, content_url),
ie=SVTPlayIE.ie_key(),
video_title=video.get('title')
))
metadata = root.get('metaData')
if not isinstance(metadata, dict):
metadata = {}
title = metadata.get('title')
season_name = season_name or season_slug
if title and season_name:
title = '%s - %s' % (title, season_name)
elif season_slug:
title = season_slug
return self.playlist_result(
entries, series_id, title, metadata.get('description'))
| 35.138983 | 133 | 0.54071 |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
determine_ext,
dict_get,
int_or_none,
try_get,
urljoin,
compat_str,
)
class SVTBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['SE']
def _extract_video(self, video_info, video_id):
is_live = dict_get(video_info, ('live', 'simulcast'), default=False)
m3u8_protocol = 'm3u8' if is_live else 'm3u8_native'
formats = []
for vr in video_info['videoReferences']:
player_type = vr.get('playerType') or vr.get('format')
vurl = vr['url']
ext = determine_ext(vurl)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
vurl, video_id,
ext='mp4', entry_protocol=m3u8_protocol,
m3u8_id=player_type, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
vurl + '?hdcore=3.3.0', video_id,
f4m_id=player_type, fatal=False))
elif ext == 'mpd':
if player_type == 'dashhbbtv':
formats.extend(self._extract_mpd_formats(
vurl, video_id, mpd_id=player_type, fatal=False))
else:
formats.append({
'format_id': player_type,
'url': vurl,
})
if not formats and video_info.get('rights', {}).get('geoBlockedSweden'):
self.raise_geo_restricted(
'This video is only available in Sweden',
countries=self._GEO_COUNTRIES)
self._sort_formats(formats)
subtitles = {}
subtitle_references = dict_get(video_info, ('subtitles', 'subtitleReferences'))
if isinstance(subtitle_references, list):
for sr in subtitle_references:
subtitle_url = sr.get('url')
subtitle_lang = sr.get('language', 'sv')
if subtitle_url:
if determine_ext(subtitle_url) == 'm3u8':
continue
subtitles.setdefault(subtitle_lang, []).append({'url': subtitle_url})
title = video_info.get('title')
series = video_info.get('programTitle')
season_number = int_or_none(video_info.get('season'))
episode = video_info.get('episodeTitle')
episode_number = int_or_none(video_info.get('episodeNumber'))
duration = int_or_none(dict_get(video_info, ('materialLength', 'contentDuration')))
age_limit = None
adult = dict_get(
video_info, ('inappropriateForChildren', 'blockedForChildren'),
skip_false_values=False)
if adult is not None:
age_limit = 18 if adult else 0
return {
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
'duration': duration,
'age_limit': age_limit,
'series': series,
'season_number': season_number,
'episode': episode,
'episode_number': episode_number,
'is_live': is_live,
}
class SVTIE(SVTBaseIE):
_VALID_URL = r'https?://(?:www\.)?svt\.se/wd\?(?:.*?&)?widgetId=(?P<widget_id>\d+)&.*?\barticleId=(?P<id>\d+)'
_TEST = {
'url': 'http://www.svt.se/wd?widgetId=23991§ionId=541&articleId=2900353&type=embed&contextSectionId=123&autostart=false',
'md5': '33e9a5d8f646523ce0868ecfb0eed77d',
'info_dict': {
'id': '2900353',
'ext': 'mp4',
'title': 'Stjärnorna skojar till det - under SVT-intervjun',
'duration': 27,
'age_limit': 0,
},
}
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'(?:<iframe src|href)="(?P<url>%s[^"]*)"' % SVTIE._VALID_URL, webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
widget_id = mobj.group('widget_id')
article_id = mobj.group('id')
info = self._download_json(
'http://www.svt.se/wd?widgetId=%s&articleId=%s&format=json&type=embed&output=json' % (widget_id, article_id),
article_id)
info_dict = self._extract_video(info['video'], article_id)
info_dict['title'] = info['context']['title']
return info_dict
class SVTPlayBaseIE(SVTBaseIE):
_SVTPLAY_RE = r'root\s*\[\s*(["\'])_*svtplay\1\s*\]\s*=\s*(?P<json>{.+?})\s*;\s*\n'
class SVTPlayIE(SVTPlayBaseIE):
IE_DESC = 'SVT Play and Öppet arkiv'
_VALID_URL = r'https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se/(?:video|klipp|kanaler)/(?P<id>[^/?
_TESTS = [{
'url': 'http://www.svtplay.se/video/5996901/flygplan-till-haile-selassie/flygplan-till-haile-selassie-2',
'md5': '2b6704fe4a28801e1a098bbf3c5ac611',
'info_dict': {
'id': '5996901',
'ext': 'mp4',
'title': 'Flygplan till Haile Selassie',
'duration': 3527,
'thumbnail': r're:^https?://.*[\.-]jpg$',
'age_limit': 0,
'subtitles': {
'sv': [{
'ext': 'wsrt',
}]
},
},
}, {
# geo restricted to Sweden
'url': 'http://www.oppetarkiv.se/video/5219710/trollflojten',
'only_matching': True,
}, {
'url': 'http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg',
'only_matching': True,
}, {
'url': 'https://www.svtplay.se/kanaler/svt1',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
data = self._parse_json(
self._search_regex(
self._SVTPLAY_RE, webpage, 'embedded data', default='{}',
group='json'),
video_id, fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
def adjust_title(info):
if info['is_live']:
info['title'] = self._live_title(info['title'])
if data:
video_info = try_get(
data, lambda x: x['context']['dispatcher']['stores']['VideoTitlePageStore']['data']['video'],
dict)
if video_info:
info_dict = self._extract_video(video_info, video_id)
info_dict.update({
'title': data['context']['dispatcher']['stores']['MetaStore']['title'],
'thumbnail': thumbnail,
})
adjust_title(info_dict)
return info_dict
video_id = self._search_regex(
r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)',
webpage, 'video id', default=None)
if video_id:
data = self._download_json(
'https://api.svt.se/videoplayer-api/video/%s' % video_id,
video_id, headers=self.geo_verification_headers())
info_dict = self._extract_video(data, video_id)
if not info_dict.get('title'):
info_dict['title'] = re.sub(
r'\s*\|\s*.+?$', '',
info_dict.get('episode') or self._og_search_title(webpage))
adjust_title(info_dict)
return info_dict
class SVTSeriesIE(SVTPlayBaseIE):
_VALID_URL = r'https?://(?:www\.)?svtplay\.se/(?P<id>[^/?&#]+)'
_TESTS = [{
'url': 'https://www.svtplay.se/rederiet',
'info_dict': {
'id': 'rederiet',
'title': 'Rederiet',
'description': 'md5:505d491a58f4fcf6eb418ecab947e69e',
},
'playlist_mincount': 318,
}, {
'url': 'https://www.svtplay.se/rederiet?tab=sasong2',
'info_dict': {
'id': 'rederiet-sasong2',
'title': 'Rederiet - Säsong 2',
'description': 'md5:505d491a58f4fcf6eb418ecab947e69e',
},
'playlist_count': 12,
}]
@classmethod
def suitable(cls, url):
return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTSeriesIE, cls).suitable(url)
def _real_extract(self, url):
series_id = self._match_id(url)
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
season_slug = qs.get('tab', [None])[0]
if season_slug:
series_id += '-%s' % season_slug
webpage = self._download_webpage(
url, series_id, 'Downloading series page')
root = self._parse_json(
self._search_regex(
self._SVTPLAY_RE, webpage, 'content', group='json'),
series_id)
season_name = None
entries = []
for season in root['relatedVideoContent']['relatedVideosAccordion']:
if not isinstance(season, dict):
continue
if season_slug:
if season.get('slug') != season_slug:
continue
season_name = season.get('name')
videos = season.get('videos')
if not isinstance(videos, list):
continue
for video in videos:
content_url = video.get('contentUrl')
if not content_url or not isinstance(content_url, compat_str):
continue
entries.append(
self.url_result(
urljoin(url, content_url),
ie=SVTPlayIE.ie_key(),
video_title=video.get('title')
))
metadata = root.get('metaData')
if not isinstance(metadata, dict):
metadata = {}
title = metadata.get('title')
season_name = season_name or season_slug
if title and season_name:
title = '%s - %s' % (title, season_name)
elif season_slug:
title = season_slug
return self.playlist_result(
entries, series_id, title, metadata.get('description'))
| true | true |
f71eabb5fc9c2151ea1e3e4278d18a8e8b93c07f | 1,032 | py | Python | RecoLocalTracker/SubCollectionProducers/python/test/MCsplitStripsCustomize_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoLocalTracker/SubCollectionProducers/python/test/MCsplitStripsCustomize_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoLocalTracker/SubCollectionProducers/python/test/MCsplitStripsCustomize_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | #
# With this customization the ClusterMCsplitStrips module will be substituted
# for the standard clusterizer. If a cluster is matched to more than one simTrack
# it will be split into the corresponding true clusters.
#
import FWCore.ParameterSet.Config as cms
def splitMCmerged(process):
process.siStripClustersUnsplit = process.siStripClusters.clone()
stripClusIndex = process.striptrackerlocalreco.index(process.siStripClusters)
process.striptrackerlocalreco.remove(process.siStripClusters)
del process.siStripClusters
process.load('RecoLocalTracker.SubCollectionProducers.test.ClusterMCsplitStrips_cfi')
process.siStripClustersMCsplit = cms.Sequence(process.siStripClustersUnsplit*process.siStripClusters)
process.striptrackerlocalreco.insert(stripClusIndex,process.siStripClustersMCsplit)
# Override the chargePerCM cut in stripCPE
process.StripCPEfromTrackAngleESProducer.parameters.maxChgOneMIP = cms.double(-6000.)
return(process)
| 44.869565 | 146 | 0.782946 |
import FWCore.ParameterSet.Config as cms
def splitMCmerged(process):
process.siStripClustersUnsplit = process.siStripClusters.clone()
stripClusIndex = process.striptrackerlocalreco.index(process.siStripClusters)
process.striptrackerlocalreco.remove(process.siStripClusters)
del process.siStripClusters
process.load('RecoLocalTracker.SubCollectionProducers.test.ClusterMCsplitStrips_cfi')
process.siStripClustersMCsplit = cms.Sequence(process.siStripClustersUnsplit*process.siStripClusters)
process.striptrackerlocalreco.insert(stripClusIndex,process.siStripClustersMCsplit)
process.StripCPEfromTrackAngleESProducer.parameters.maxChgOneMIP = cms.double(-6000.)
return(process)
| true | true |
f71eac7b3cc16b05e2e80303287fcdacd9ff87af | 9,986 | py | Python | src/cmudict_parser/SentenceToIPA.py | stefantaubert/cmudict-parser | 8f5d1b191a41929f1ce8c7acf391c23c08d2be15 | [
"MIT"
] | null | null | null | src/cmudict_parser/SentenceToIPA.py | stefantaubert/cmudict-parser | 8f5d1b191a41929f1ce8c7acf391c23c08d2be15 | [
"MIT"
] | 14 | 2020-12-01T08:45:16.000Z | 2021-06-01T08:00:39.000Z | src/cmudict_parser/SentenceToIPA.py | stefantaubert/cmudict-parser | 8f5d1b191a41929f1ce8c7acf391c23c08d2be15 | [
"MIT"
] | null | null | null | """
Remarks:
https://github.com/cmusphinx/cmudict is newer than 0.7b! It has for example 'declarative' but is has unfortunately no MIT-license.
"""
import string
from logging import getLogger
from typing import Callable, Dict, List, Optional, Tuple, Union
PUNCTUATION_AND_LINEBREAK = f"{string.punctuation}\n"
IPA_CACHE: Dict[str, str] = {}
def clear_cache() -> None:
IPA_CACHE.clear()
def sentence_to_ipa(dict: Dict[str, str], sentence: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]], use_caching: bool) -> str:
words = sentence.split(" ")
if use_caching:
ipa_words = [get_ipa_of_word_in_sentence_cache(
dict, word, replace_unknown_with) for word in words]
else:
ipa_words = [get_ipa_of_word_in_sentence(dict, word, replace_unknown_with) for word in words]
res = " ".join(ipa_words)
return res
def get_ipa_of_word_in_sentence_cache(dict: Dict[str, str], word: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> str:
global IPA_CACHE
if word in IPA_CACHE:
return IPA_CACHE[word]
else:
ipa = get_ipa_of_word_in_sentence(dict, word, replace_unknown_with)
IPA_CACHE[word] = ipa
return ipa
def get_ipa_of_word_in_sentence(dict: Dict[str, str], word: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> str:
global IPA_CACHE
if any(char in PUNCTUATION_AND_LINEBREAK for char in word):
ipa = get_ipa_of_word_with_punctuation(dict, word, replace_unknown_with)
else:
ipa = get_ipa_of_word_without_punctuation_or_unknown_words(dict, word, replace_unknown_with)
return ipa
def get_ipa_of_word_with_punctuation(dict: Dict[str, str], word: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> str:
word, punctuation_before_word = extract_punctuation_before_word(word)
if word == "":
return punctuation_before_word
word_without_punctuation, punctuation_after_word = extract_punctuation_after_word_except_hyphen_or_apostrophe(
word)
return ipa_of_punctuation_and_words_combined(dict, punctuation_before_word, word_without_punctuation, punctuation_after_word, replace_unknown_with)
def extract_punctuation_before_word(word: str) -> Tuple[str, str]:
punctuation_before_word = ""
while word != "" and (word[0] in PUNCTUATION_AND_LINEBREAK):
punctuation_before_word += word[0]
word = word[1:]
return word, punctuation_before_word
def extract_punctuation_after_word_except_hyphen_or_apostrophe(word: str) -> Tuple[str, str]:
punctuation_after_word = word
word_without_punctuation = ""
while punctuation_after_word != "" and (punctuation_after_word[0].isalpha() or punctuation_after_word[0] in "'-"):
word_without_punctuation += punctuation_after_word[0]
punctuation_after_word = punctuation_after_word[1:]
return word_without_punctuation, punctuation_after_word
def ipa_of_punctuation_and_words_combined(dict: Dict[str, str], punctuation_before_word: str, word_without_punctuation: str, punctuation_after_word: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> str:
assert word_without_punctuation != "" and word_without_punctuation[0].isalpha()
word_without_punctuation, char_at_end, word_with_apo_at_beginning, word_with_apo_at_end, word_with_apo_at_end_and_beginning = word_with_apo(
word_without_punctuation)
if punctuation_before_word != "" and punctuation_before_word[-1] == "'" and char_at_end == "'" and word_with_apo_at_end_and_beginning.upper() in dict:
punctuation_before_word = punctuation_before_word[:-1]
ipa_of_word_without_punct = get_ipa_of_word_without_punctuation_or_unknown_words(
dict, word_with_apo_at_end_and_beginning, replace_unknown_with)
elif punctuation_before_word != "" and word_with_apo_at_beginning.upper() in dict and punctuation_before_word[-1] == "'":
punctuation_before_word = punctuation_before_word[:-1]
ipa_of_word_without_punct = f"{get_ipa_of_word_without_punctuation_or_unknown_words(dict, word_with_apo_at_beginning, replace_unknown_with)}{char_at_end}"
elif word_with_apo_at_end.upper() in dict and char_at_end == "'":
ipa_of_word_without_punct = get_ipa_of_word_without_punctuation_or_unknown_words(
dict, word_with_apo_at_end, replace_unknown_with)
elif "-" in word_without_punctuation and not word_without_punctuation.upper() in dict:
ipa_of_word_without_punct = f"{get_ipa_of_words_with_hyphen(dict, word_without_punctuation, replace_unknown_with)}{char_at_end}"
else:
ipa_of_word_without_punct = f"{get_ipa_of_word_without_punctuation_or_unknown_words(dict, word_without_punctuation, replace_unknown_with)}{char_at_end}"
return value_depending_on_is_alphabetic_value_in_punctuation_after_word(dict, punctuation_before_word, ipa_of_word_without_punct, punctuation_after_word, replace_unknown_with)
def word_with_apo(word_without_punctuation: str) -> Tuple[str, str, str, str, str]:
if word_without_punctuation[-1] in "-'":
return word_without_punctuation[:-1], word_without_punctuation[-1], f"'{word_without_punctuation[:-1]}", f"{word_without_punctuation[:-1]}'", f"'{word_without_punctuation[:-1]}'"
return word_without_punctuation, "", f"'{word_without_punctuation}", f"{word_without_punctuation}'", f"'{word_without_punctuation}'"
def value_depending_on_is_alphabetic_value_in_punctuation_after_word(dict: Dict[str, str], punctuation_before_word: str, ipa_of_word_without_punct: str, punctuation_after_word: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> str:
if any(char.isalpha() for char in punctuation_after_word):
return f"{punctuation_before_word}{ipa_of_word_without_punct}{get_ipa_of_word_with_punctuation(dict, punctuation_after_word, replace_unknown_with)}"
return f"{punctuation_before_word}{ipa_of_word_without_punct}{punctuation_after_word}"
def get_ipa_of_words_with_hyphen(dict: Dict[str, str], word: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> str:
parts = word.split("-")
ipa = ""
for length_of_combination in range(len(parts), 0, -1):
ipa = find_combination_of_certain_length_in_dict(
dict, parts, length_of_combination, replace_unknown_with)
if ipa is not None:
break
if ipa is None:
unknown_list = [get_ipa_of_word_without_punctuation_or_unknown_words(
dict, part, replace_unknown_with) for part in parts]
ipa = "-".join(unknown_list)
return ipa
def find_combination_of_certain_length_in_dict(dict: Dict[str, str], parts: List[str], length_of_combination, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> Optional[str]:
assert all_keys_are_upper(dict)
for startword_pos in range(len(parts) - length_of_combination + 1):
combination = recombine_word(parts, startword_pos, startword_pos + length_of_combination)
word, apos_before, apos_after = strip_apos_at_beginning_and_end_if_they_do_not_belong_to_word(
dict, combination)
if word.upper() in dict:
word_before, hyphen_before = word_and_hyphen_before_or_after(parts, 0, startword_pos)
word_after, hyphen_after = word_and_hyphen_before_or_after(
parts, startword_pos + length_of_combination, len(parts))
return f"{get_ipa_of_word_in_sentence(dict, word_before, replace_unknown_with)}{hyphen_before}{apos_before}{dict[word.upper()]}{apos_after}{hyphen_after}{get_ipa_of_word_in_sentence(dict, word_after, replace_unknown_with)}"
return None
def strip_apos_at_beginning_and_end_if_they_do_not_belong_to_word(dict: Dict[str, str], word: str) -> Tuple[str, str, str]:
word, apos_before = strip_apos(word, 0)
word, apos_after = strip_apos(word, -1)
if f"{word}'".upper() in dict and apos_after != "":
word = f"{word}'"
apos_after = apos_after[:-1]
if f"'{word}".upper() in dict and apos_before != "":
word = f"'{word}"
apos_before = apos_before[:-1]
return word, apos_before, apos_after
def strip_apos(word: str, pos: int) -> Tuple[str, str]:
assert pos == 0 or pos == -1
apos = ""
while word != "" and word[pos] == "'":
apos += "'"
word = word[1:] if pos == 0 else word[:-1]
return word, apos
def word_and_hyphen_before_or_after(parts: List[str], startpos: int, endpos: int) -> Tuple[str, str]:
if endpos == 0 or startpos == len(parts):
return "", ""
return recombine_word(parts, startpos, endpos), "-"
def recombine_word(parts: List[str], startpos: int, endpos: int) -> str:
assert startpos >= 0 and startpos < endpos and endpos <= len(parts)
parts = parts[startpos:endpos]
word = "-".join(parts)
return word
def get_ipa_of_word_without_punctuation_or_unknown_words(dict: Dict[str, str], word: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> str:
assert all_keys_are_upper(dict)
if word == "":
return ""
if word.upper() in dict:
return dict[word.upper()]
if word_is_really_upper(word):
return big_letters_to_ipa(dict, word)
if replace_unknown_with is None:
return word
if isinstance(replace_unknown_with, str):
return replace_unknown_with_is_string(word, replace_unknown_with)
return replace_unknown_with(word)
def replace_unknown_with_is_string(word: str, replace_unknown_with: str) -> str:
assert isinstance(replace_unknown_with, str)
if len(replace_unknown_with) >= 2:
raise ValueError("Parameter replace_unknown_with can only be 0 or 1 char.")
res = len(word) * replace_unknown_with
logger = getLogger(__name__)
logger.warning(f"Replaced {word} with {res}")
return res
def word_is_really_upper(word: str) -> bool:
return word.isupper() and word.isalpha()
def big_letters_to_ipa(dict: Dict[str, str], word: str) -> str:
assert all_keys_are_upper(dict)
assert word_is_really_upper(word) or word == ""
ipa = ""
for char in word:
assert char in dict
ipa += dict[char]
return ipa
def all_keys_are_upper(dict: Dict[str, str]) -> bool:
for key in dict.keys():
if not key.isupper():
return False
return True
| 46.446512 | 255 | 0.763168 |
import string
from logging import getLogger
from typing import Callable, Dict, List, Optional, Tuple, Union
PUNCTUATION_AND_LINEBREAK = f"{string.punctuation}\n"
IPA_CACHE: Dict[str, str] = {}
def clear_cache() -> None:
IPA_CACHE.clear()
def sentence_to_ipa(dict: Dict[str, str], sentence: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]], use_caching: bool) -> str:
words = sentence.split(" ")
if use_caching:
ipa_words = [get_ipa_of_word_in_sentence_cache(
dict, word, replace_unknown_with) for word in words]
else:
ipa_words = [get_ipa_of_word_in_sentence(dict, word, replace_unknown_with) for word in words]
res = " ".join(ipa_words)
return res
def get_ipa_of_word_in_sentence_cache(dict: Dict[str, str], word: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> str:
global IPA_CACHE
if word in IPA_CACHE:
return IPA_CACHE[word]
else:
ipa = get_ipa_of_word_in_sentence(dict, word, replace_unknown_with)
IPA_CACHE[word] = ipa
return ipa
def get_ipa_of_word_in_sentence(dict: Dict[str, str], word: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> str:
global IPA_CACHE
if any(char in PUNCTUATION_AND_LINEBREAK for char in word):
ipa = get_ipa_of_word_with_punctuation(dict, word, replace_unknown_with)
else:
ipa = get_ipa_of_word_without_punctuation_or_unknown_words(dict, word, replace_unknown_with)
return ipa
def get_ipa_of_word_with_punctuation(dict: Dict[str, str], word: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> str:
word, punctuation_before_word = extract_punctuation_before_word(word)
if word == "":
return punctuation_before_word
word_without_punctuation, punctuation_after_word = extract_punctuation_after_word_except_hyphen_or_apostrophe(
word)
return ipa_of_punctuation_and_words_combined(dict, punctuation_before_word, word_without_punctuation, punctuation_after_word, replace_unknown_with)
def extract_punctuation_before_word(word: str) -> Tuple[str, str]:
punctuation_before_word = ""
while word != "" and (word[0] in PUNCTUATION_AND_LINEBREAK):
punctuation_before_word += word[0]
word = word[1:]
return word, punctuation_before_word
def extract_punctuation_after_word_except_hyphen_or_apostrophe(word: str) -> Tuple[str, str]:
punctuation_after_word = word
word_without_punctuation = ""
while punctuation_after_word != "" and (punctuation_after_word[0].isalpha() or punctuation_after_word[0] in "'-"):
word_without_punctuation += punctuation_after_word[0]
punctuation_after_word = punctuation_after_word[1:]
return word_without_punctuation, punctuation_after_word
def ipa_of_punctuation_and_words_combined(dict: Dict[str, str], punctuation_before_word: str, word_without_punctuation: str, punctuation_after_word: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> str:
assert word_without_punctuation != "" and word_without_punctuation[0].isalpha()
word_without_punctuation, char_at_end, word_with_apo_at_beginning, word_with_apo_at_end, word_with_apo_at_end_and_beginning = word_with_apo(
word_without_punctuation)
if punctuation_before_word != "" and punctuation_before_word[-1] == "'" and char_at_end == "'" and word_with_apo_at_end_and_beginning.upper() in dict:
punctuation_before_word = punctuation_before_word[:-1]
ipa_of_word_without_punct = get_ipa_of_word_without_punctuation_or_unknown_words(
dict, word_with_apo_at_end_and_beginning, replace_unknown_with)
elif punctuation_before_word != "" and word_with_apo_at_beginning.upper() in dict and punctuation_before_word[-1] == "'":
punctuation_before_word = punctuation_before_word[:-1]
ipa_of_word_without_punct = f"{get_ipa_of_word_without_punctuation_or_unknown_words(dict, word_with_apo_at_beginning, replace_unknown_with)}{char_at_end}"
elif word_with_apo_at_end.upper() in dict and char_at_end == "'":
ipa_of_word_without_punct = get_ipa_of_word_without_punctuation_or_unknown_words(
dict, word_with_apo_at_end, replace_unknown_with)
elif "-" in word_without_punctuation and not word_without_punctuation.upper() in dict:
ipa_of_word_without_punct = f"{get_ipa_of_words_with_hyphen(dict, word_without_punctuation, replace_unknown_with)}{char_at_end}"
else:
ipa_of_word_without_punct = f"{get_ipa_of_word_without_punctuation_or_unknown_words(dict, word_without_punctuation, replace_unknown_with)}{char_at_end}"
return value_depending_on_is_alphabetic_value_in_punctuation_after_word(dict, punctuation_before_word, ipa_of_word_without_punct, punctuation_after_word, replace_unknown_with)
def word_with_apo(word_without_punctuation: str) -> Tuple[str, str, str, str, str]:
if word_without_punctuation[-1] in "-'":
return word_without_punctuation[:-1], word_without_punctuation[-1], f"'{word_without_punctuation[:-1]}", f"{word_without_punctuation[:-1]}'", f"'{word_without_punctuation[:-1]}'"
return word_without_punctuation, "", f"'{word_without_punctuation}", f"{word_without_punctuation}'", f"'{word_without_punctuation}'"
def value_depending_on_is_alphabetic_value_in_punctuation_after_word(dict: Dict[str, str], punctuation_before_word: str, ipa_of_word_without_punct: str, punctuation_after_word: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> str:
if any(char.isalpha() for char in punctuation_after_word):
return f"{punctuation_before_word}{ipa_of_word_without_punct}{get_ipa_of_word_with_punctuation(dict, punctuation_after_word, replace_unknown_with)}"
return f"{punctuation_before_word}{ipa_of_word_without_punct}{punctuation_after_word}"
def get_ipa_of_words_with_hyphen(dict: Dict[str, str], word: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> str:
parts = word.split("-")
ipa = ""
for length_of_combination in range(len(parts), 0, -1):
ipa = find_combination_of_certain_length_in_dict(
dict, parts, length_of_combination, replace_unknown_with)
if ipa is not None:
break
if ipa is None:
unknown_list = [get_ipa_of_word_without_punctuation_or_unknown_words(
dict, part, replace_unknown_with) for part in parts]
ipa = "-".join(unknown_list)
return ipa
def find_combination_of_certain_length_in_dict(dict: Dict[str, str], parts: List[str], length_of_combination, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> Optional[str]:
assert all_keys_are_upper(dict)
for startword_pos in range(len(parts) - length_of_combination + 1):
combination = recombine_word(parts, startword_pos, startword_pos + length_of_combination)
word, apos_before, apos_after = strip_apos_at_beginning_and_end_if_they_do_not_belong_to_word(
dict, combination)
if word.upper() in dict:
word_before, hyphen_before = word_and_hyphen_before_or_after(parts, 0, startword_pos)
word_after, hyphen_after = word_and_hyphen_before_or_after(
parts, startword_pos + length_of_combination, len(parts))
return f"{get_ipa_of_word_in_sentence(dict, word_before, replace_unknown_with)}{hyphen_before}{apos_before}{dict[word.upper()]}{apos_after}{hyphen_after}{get_ipa_of_word_in_sentence(dict, word_after, replace_unknown_with)}"
return None
def strip_apos_at_beginning_and_end_if_they_do_not_belong_to_word(dict: Dict[str, str], word: str) -> Tuple[str, str, str]:
word, apos_before = strip_apos(word, 0)
word, apos_after = strip_apos(word, -1)
if f"{word}'".upper() in dict and apos_after != "":
word = f"{word}'"
apos_after = apos_after[:-1]
if f"'{word}".upper() in dict and apos_before != "":
word = f"'{word}"
apos_before = apos_before[:-1]
return word, apos_before, apos_after
def strip_apos(word: str, pos: int) -> Tuple[str, str]:
assert pos == 0 or pos == -1
apos = ""
while word != "" and word[pos] == "'":
apos += "'"
word = word[1:] if pos == 0 else word[:-1]
return word, apos
def word_and_hyphen_before_or_after(parts: List[str], startpos: int, endpos: int) -> Tuple[str, str]:
if endpos == 0 or startpos == len(parts):
return "", ""
return recombine_word(parts, startpos, endpos), "-"
def recombine_word(parts: List[str], startpos: int, endpos: int) -> str:
assert startpos >= 0 and startpos < endpos and endpos <= len(parts)
parts = parts[startpos:endpos]
word = "-".join(parts)
return word
def get_ipa_of_word_without_punctuation_or_unknown_words(dict: Dict[str, str], word: str, replace_unknown_with: Optional[Union[str, Callable[[str], str]]]) -> str:
assert all_keys_are_upper(dict)
if word == "":
return ""
if word.upper() in dict:
return dict[word.upper()]
if word_is_really_upper(word):
return big_letters_to_ipa(dict, word)
if replace_unknown_with is None:
return word
if isinstance(replace_unknown_with, str):
return replace_unknown_with_is_string(word, replace_unknown_with)
return replace_unknown_with(word)
def replace_unknown_with_is_string(word: str, replace_unknown_with: str) -> str:
assert isinstance(replace_unknown_with, str)
if len(replace_unknown_with) >= 2:
raise ValueError("Parameter replace_unknown_with can only be 0 or 1 char.")
res = len(word) * replace_unknown_with
logger = getLogger(__name__)
logger.warning(f"Replaced {word} with {res}")
return res
def word_is_really_upper(word: str) -> bool:
return word.isupper() and word.isalpha()
def big_letters_to_ipa(dict: Dict[str, str], word: str) -> str:
assert all_keys_are_upper(dict)
assert word_is_really_upper(word) or word == ""
ipa = ""
for char in word:
assert char in dict
ipa += dict[char]
return ipa
def all_keys_are_upper(dict: Dict[str, str]) -> bool:
for key in dict.keys():
if not key.isupper():
return False
return True
| true | true |
f71eac9488b5831e389ba288de7a4535c5b2afd7 | 1,304 | py | Python | controlapp/monitorapp/src/serverapp/serversocket/reptilesserversocket.py | kuspen/reptiles-monitor | ccb4a96e5b0091228a10eaa0e6fbf1a72795ca91 | [
"MIT"
] | null | null | null | controlapp/monitorapp/src/serverapp/serversocket/reptilesserversocket.py | kuspen/reptiles-monitor | ccb4a96e5b0091228a10eaa0e6fbf1a72795ca91 | [
"MIT"
] | null | null | null | controlapp/monitorapp/src/serverapp/serversocket/reptilesserversocket.py | kuspen/reptiles-monitor | ccb4a96e5b0091228a10eaa0e6fbf1a72795ca91 | [
"MIT"
] | null | null | null | import sys
import socket
sys.path.append('../')
import common.define
BUF_SIZE = 2048
class ReptilesServerSocket():
def __init__(self, sock=None):
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
def setsocket(self):
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def bind(self):
self.sock.bind((common.define.SERVER_IP_ADDR, common.define.SERVER_PORT))
def listen(self, num=1):
self.sock.listen(num)
def accept(self):
self.conn, self.addr = self.sock.accept()
def send(self, msg):
totalsent = 0
msglen = len(msg)
while totalsent < msglen:
sent = self.conn.send(msg[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
def receive(self, msglen):
chunks = []
bytes_recd = 0
while bytes_recd < msglen:
chunk = self.conn.recv(min(msglen - bytes_recd, 2048))
if chunk == b'':
raise RuntimeError("socket connection broken")
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
return b''.join(chunks)
| 26.08 | 81 | 0.58819 | import sys
import socket
sys.path.append('../')
import common.define
BUF_SIZE = 2048
class ReptilesServerSocket():
def __init__(self, sock=None):
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
def setsocket(self):
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def bind(self):
self.sock.bind((common.define.SERVER_IP_ADDR, common.define.SERVER_PORT))
def listen(self, num=1):
self.sock.listen(num)
def accept(self):
self.conn, self.addr = self.sock.accept()
def send(self, msg):
totalsent = 0
msglen = len(msg)
while totalsent < msglen:
sent = self.conn.send(msg[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
def receive(self, msglen):
chunks = []
bytes_recd = 0
while bytes_recd < msglen:
chunk = self.conn.recv(min(msglen - bytes_recd, 2048))
if chunk == b'':
raise RuntimeError("socket connection broken")
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
return b''.join(chunks)
| true | true |
f71eacb0cebaf99c989c8497d1bdf211436cdebe | 774 | py | Python | python_zabbix/client.py | zhenghuaHe/stu_python | e0937070248269527661ccf32e5bea048170ac17 | [
"Apache-2.0"
] | null | null | null | python_zabbix/client.py | zhenghuaHe/stu_python | e0937070248269527661ccf32e5bea048170ac17 | [
"Apache-2.0"
] | null | null | null | python_zabbix/client.py | zhenghuaHe/stu_python | e0937070248269527661ccf32e5bea048170ac17 | [
"Apache-2.0"
] | null | null | null | # -*- coding=utf-8 -*-
import socket
import psutil
import json
# 创建链接
# 生成一个socket对象
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 8888
# 请求连接服务端
sk.connect((host, port))
#获取信息
#获取主机名
hostname = socket.getfqdn(socket.gethostname())
#获取主机IP地址
host_ip = socket.gethostbyname(hostname)
#获取内存使用率
host_memory = str(psutil.virtual_memory().percent)
#获取CPU的使用率
host_cpu = str(psutil.cpu_percent(0))
#本机登录用户
host_user = str(psutil.users())
#写入字典
info = {"主机名:": hostname,"主机IP地址:": host_ip,"内存使用率:": host_memory,"CPU使用率:": host_cpu,"登录用户详情:": host_user}
result = json.dumps(info)
#发送数据
# sk.send(bytes(dict))
sk.send(result.encode('utf8'))
#接受信息
#接受小于1024字节的数据
msg = sk.recv(1024)
print(msg.decode('utf-8'))
#关闭连接
sk.close() | 16.125 | 107 | 0.719638 |
import socket
import psutil
import json
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 8888
sk.connect((host, port))
hostname = socket.getfqdn(socket.gethostname())
host_ip = socket.gethostbyname(hostname)
host_memory = str(psutil.virtual_memory().percent)
host_cpu = str(psutil.cpu_percent(0))
host_user = str(psutil.users())
info = {"主机名:": hostname,"主机IP地址:": host_ip,"内存使用率:": host_memory,"CPU使用率:": host_cpu,"登录用户详情:": host_user}
result = json.dumps(info)
sk.send(result.encode('utf8'))
msg = sk.recv(1024)
print(msg.decode('utf-8'))
sk.close() | true | true |
f71ead2ecdc6c76a593c5eab8b227fec6b66241f | 3,111 | py | Python | helloworld/helloworld/settings.py | yprateek136/complete_web_developement | e74faad9a9f0708a12df085d1c08170c4b6a7691 | [
"MIT"
] | null | null | null | helloworld/helloworld/settings.py | yprateek136/complete_web_developement | e74faad9a9f0708a12df085d1c08170c4b6a7691 | [
"MIT"
] | null | null | null | helloworld/helloworld/settings.py | yprateek136/complete_web_developement | e74faad9a9f0708a12df085d1c08170c4b6a7691 | [
"MIT"
] | null | null | null | """
Django settings for helloworld project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r%rqu$hl0#i4xpvy@9cvj_2(_y+f1q6n%d%klw3tihj($v+3n-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'helloworld.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'helloworld.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| 25.710744 | 91 | 0.697525 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'r%rqu$hl0#i4xpvy@9cvj_2(_y+f1q6n%d%klw3tihj($v+3n-'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'helloworld.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'helloworld.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| true | true |
f71ead8c9b58fad05373969af02b648b96bc2ab9 | 3,950 | py | Python | tests20/python_client/load/test_workload.py | reyoung/milvus | 7557616fea7ff7a8b093c85a5c17134112fa89f8 | [
"Apache-2.0"
] | null | null | null | tests20/python_client/load/test_workload.py | reyoung/milvus | 7557616fea7ff7a8b093c85a5c17134112fa89f8 | [
"Apache-2.0"
] | null | null | null | tests20/python_client/load/test_workload.py | reyoung/milvus | 7557616fea7ff7a8b093c85a5c17134112fa89f8 | [
"Apache-2.0"
] | null | null | null | import datetime
import pytest
from base.client_base import TestcaseBase
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel
from utils.util_log import test_log as log
from pymilvus_orm import utility
rounds = 100
per_nb = 100000
default_field_name = ct.default_float_vec_field_name
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
class TestLoad(TestcaseBase):
""" Test case of end to end"""
@pytest.mark.tags(CaseLabel.L3)
def test_load_default(self):
name = 'load_test_collection_1'
name2 = 'load_test_collection_2'
# create
# collection_w = self.init_collection_wrap(name=name)
# collection_w2 = self.init_collection_wrap(name=name2)
# assert collection_w.name == name
for i in range(50):
name = f"load_collection2_{i}"
self.init_collection_wrap(name=name)
log.debug(f"total collections: {len(utility.list_collections())}")
# # insert
# data = cf.gen_default_list_data(per_nb)
# log.debug(f"data len: {len(data[0])}")
# for i in range(rounds):
# t0 = datetime.datetime.now()
# ins_res, res = collection_w.insert(data, timeout=180)
# tt = datetime.datetime.now() - t0
# log.debug(f"round{i} insert: {len(ins_res.primary_keys)} entities in {tt}s")
# assert res # and per_nb == len(ins_res.primary_keys)
#
# t0 = datetime.datetime.now()
# ins_res2, res = collection_w2.insert(data, timeout=180)
# tt = datetime.datetime.now() - t0
# log.debug(f"round{i} insert2: {len(ins_res2.primary_keys)} entities in {tt}s")
# assert res
#
# # flush
# t0 = datetime.datetime.now()
# log.debug(f"current collection num_entities: {collection_w.num_entities}")
# tt = datetime.datetime.now() - t0
# log.debug(f"round{i} flush in {tt}")
#
# t0 = datetime.datetime.now()
# log.debug(f"current collection2 num_entities: {collection_w2.num_entities}")
# tt = datetime.datetime.now() - t0
# log.debug(f"round{i} flush2 in {tt}")
# index, res = collection_w.create_index(default_field_name, default_index_params, timeout=60)
# assert res
# # search
# collection_w.load()
# search_vectors = cf.gen_vectors(1, ct.default_dim)
# t0 = datetime.datetime.now()
# res_1, _ = collection_w.search(data=search_vectors,
# anns_field=ct.default_float_vec_field_name,
# param={"nprobe": 16}, limit=1)
# tt = datetime.datetime.now() - t0
# log.debug(f"assert search: {tt}")
# assert len(res_1) == 1
# # collection_w.release()
#
# # index
# collection_w.insert(cf.gen_default_dataframe_data(nb=5000))
# assert collection_w.num_entities == len(data[0]) + 5000
# _index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
# t0 = datetime.datetime.now()
# index, _ = collection_w.create_index(field_name=ct.default_float_vec_field_name,
# index_params=_index_params,
# name=cf.gen_unique_str())
# tt = datetime.datetime.now() - t0
# log.debug(f"assert index: {tt}")
# assert len(collection_w.indexes) == 1
#
# # query
# term_expr = f'{ct.default_int64_field_name} in [3001,4001,4999,2999]'
# t0 = datetime.datetime.now()
# res, _ = collection_w.query(term_expr)
# tt = datetime.datetime.now() - t0
# log.debug(f"assert query: {tt}")
# assert len(res) == 4
| 41.578947 | 102 | 0.586076 | import datetime
import pytest
from base.client_base import TestcaseBase
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel
from utils.util_log import test_log as log
from pymilvus_orm import utility
rounds = 100
per_nb = 100000
default_field_name = ct.default_float_vec_field_name
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
class TestLoad(TestcaseBase):
@pytest.mark.tags(CaseLabel.L3)
def test_load_default(self):
name = 'load_test_collection_1'
name2 = 'load_test_collection_2'
for i in range(50):
name = f"load_collection2_{i}"
self.init_collection_wrap(name=name)
log.debug(f"total collections: {len(utility.list_collections())}")
| true | true |
f71ead9063e6e1999ccb1b30dc9112382068d484 | 293 | py | Python | python exercicios/func/funct10.py | gabrielqoliveiraa/bomdia | b5e0fe6aa347a0e31b5960a69fbd6f32df352094 | [
"MIT"
] | null | null | null | python exercicios/func/funct10.py | gabrielqoliveiraa/bomdia | b5e0fe6aa347a0e31b5960a69fbd6f32df352094 | [
"MIT"
] | null | null | null | python exercicios/func/funct10.py | gabrielqoliveiraa/bomdia | b5e0fe6aa347a0e31b5960a69fbd6f32df352094 | [
"MIT"
] | null | null | null | def leiaInt(msg):
ok = False
valor = 0
while True:
n = str(input(msg))
if n.isnumeric():
valor = int(n)
ok = True
else:
print('ERRO!')
if ok:
break
return valor
n = leiaInt('Digite um número: ') | 18.3125 | 33 | 0.440273 | def leiaInt(msg):
ok = False
valor = 0
while True:
n = str(input(msg))
if n.isnumeric():
valor = int(n)
ok = True
else:
print('ERRO!')
if ok:
break
return valor
n = leiaInt('Digite um número: ') | true | true |
f71eadd98dbe3ba79d4203630eff5be2409e013c | 3,410 | py | Python | Bake_bot/migrations/0001_initial.py | annfike/Bake_Cake_bot | 9407f99d1832d0bd5be409d1c02a6dfa8c3a4fff | [
"MIT"
] | null | null | null | Bake_bot/migrations/0001_initial.py | annfike/Bake_Cake_bot | 9407f99d1832d0bd5be409d1c02a6dfa8c3a4fff | [
"MIT"
] | null | null | null | Bake_bot/migrations/0001_initial.py | annfike/Bake_Cake_bot | 9407f99d1832d0bd5be409d1c02a6dfa8c3a4fff | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-10-27 09:42
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.PositiveIntegerField(unique=True, verbose_name='Внешний ID покупателя')),
('tg_username', models.CharField(blank=True, default='', max_length=50, verbose_name='Имя покупателя в Телеграме')),
('first_name', models.CharField(blank=True, default='', max_length=256, verbose_name='Имя')),
('last_name', models.CharField(blank=True, default='', max_length=256, verbose_name='Фамилия')),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),
('GDPR_status', models.BooleanField(default=False, null=True)),
('home_address', models.CharField(blank=True, default='', max_length=50, verbose_name='Домашний адрес')),
],
options={
'verbose_name': 'Покупатель',
'verbose_name_plural': 'Покупатели',
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_number', models.PositiveIntegerField(default=None, null=True, unique=True, verbose_name='Номер заказа')),
('order_price', models.PositiveIntegerField(verbose_name='Цена заказа')),
('order_date', models.DateTimeField()),
('order_status', models.CharField(choices=[('Заявка обрабатывается', 'Заявка обрабатывается'), ('Готовим ваш торт', 'Готовим ваш торт'), ('Продукт в пути', 'Продукт в пути'), ('Продукт у вас', 'Продукт у вас')], default='Заявка обрабатывается', max_length=256)),
],
options={
'verbose_name': 'Заказ',
'verbose_name_plural': 'Заказы',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=256)),
],
options={
'verbose_name': 'Продукт',
'verbose_name_plural': 'Продукты',
},
),
migrations.CreateModel(
name='Product_parameters',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parameter_name', models.CharField(max_length=256)),
('parameter_price', models.PositiveIntegerField(verbose_name='Цена')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Bake_bot.product')),
],
options={
'verbose_name': 'Параметры продукта',
'verbose_name_plural': 'Параметры продуктов',
},
),
]
| 47.361111 | 278 | 0.58827 |
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.PositiveIntegerField(unique=True, verbose_name='Внешний ID покупателя')),
('tg_username', models.CharField(blank=True, default='', max_length=50, verbose_name='Имя покупателя в Телеграме')),
('first_name', models.CharField(blank=True, default='', max_length=256, verbose_name='Имя')),
('last_name', models.CharField(blank=True, default='', max_length=256, verbose_name='Фамилия')),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),
('GDPR_status', models.BooleanField(default=False, null=True)),
('home_address', models.CharField(blank=True, default='', max_length=50, verbose_name='Домашний адрес')),
],
options={
'verbose_name': 'Покупатель',
'verbose_name_plural': 'Покупатели',
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_number', models.PositiveIntegerField(default=None, null=True, unique=True, verbose_name='Номер заказа')),
('order_price', models.PositiveIntegerField(verbose_name='Цена заказа')),
('order_date', models.DateTimeField()),
('order_status', models.CharField(choices=[('Заявка обрабатывается', 'Заявка обрабатывается'), ('Готовим ваш торт', 'Готовим ваш торт'), ('Продукт в пути', 'Продукт в пути'), ('Продукт у вас', 'Продукт у вас')], default='Заявка обрабатывается', max_length=256)),
],
options={
'verbose_name': 'Заказ',
'verbose_name_plural': 'Заказы',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=256)),
],
options={
'verbose_name': 'Продукт',
'verbose_name_plural': 'Продукты',
},
),
migrations.CreateModel(
name='Product_parameters',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parameter_name', models.CharField(max_length=256)),
('parameter_price', models.PositiveIntegerField(verbose_name='Цена')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Bake_bot.product')),
],
options={
'verbose_name': 'Параметры продукта',
'verbose_name_plural': 'Параметры продуктов',
},
),
]
| true | true |
f71eae5bad1890b78020354c67dece0739586089 | 1,359 | py | Python | blog/migrations/0001_initial.py | elasyaf/djangblog | d064662ad5eb6642022d957c99d434f96fc9fb51 | [
"Unlicense"
] | null | null | null | blog/migrations/0001_initial.py | elasyaf/djangblog | d064662ad5eb6642022d957c99d434f96fc9fb51 | [
"Unlicense"
] | null | null | null | blog/migrations/0001_initial.py | elasyaf/djangblog | d064662ad5eb6642022d957c99d434f96fc9fb51 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-27 15:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, unique=True)),
('slug', models.SlugField(max_length=100, unique=True)),
('body', models.TextField()),
('posted', models.DateTimeField(auto_now_add=True, db_index=True)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(db_index=True, max_length=100)),
('slug', models.SlugField(max_length=100)),
],
),
migrations.AddField(
model_name='blog',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category'),
),
]
| 33.146341 | 114 | 0.577631 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, unique=True)),
('slug', models.SlugField(max_length=100, unique=True)),
('body', models.TextField()),
('posted', models.DateTimeField(auto_now_add=True, db_index=True)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(db_index=True, max_length=100)),
('slug', models.SlugField(max_length=100)),
],
),
migrations.AddField(
model_name='blog',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category'),
),
]
| true | true |
f71eaf00550df0365a2be759b57becb21a74722f | 509 | py | Python | university_dost/users/tests/test_tasks.py | dhavalsavalia/university_dost | ef6c78239dd648542b68b610528e0b9a23a94295 | [
"MIT"
] | null | null | null | university_dost/users/tests/test_tasks.py | dhavalsavalia/university_dost | ef6c78239dd648542b68b610528e0b9a23a94295 | [
"MIT"
] | null | null | null | university_dost/users/tests/test_tasks.py | dhavalsavalia/university_dost | ef6c78239dd648542b68b610528e0b9a23a94295 | [
"MIT"
] | 1 | 2020-06-05T09:29:09.000Z | 2020-06-05T09:29:09.000Z | import pytest
from celery.result import EagerResult
from university_dost.users.tasks import get_users_count
from university_dost.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
def test_user_count(settings):
"""A basic test to execute the get_users_count Celery task."""
UserFactory.create_batch(3)
settings.CELERY_TASK_ALWAYS_EAGER = True
task_result = get_users_count.delay()
assert isinstance(task_result, EagerResult)
assert task_result.result == 3
| 29.941176 | 66 | 0.795678 | import pytest
from celery.result import EagerResult
from university_dost.users.tasks import get_users_count
from university_dost.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
def test_user_count(settings):
UserFactory.create_batch(3)
settings.CELERY_TASK_ALWAYS_EAGER = True
task_result = get_users_count.delay()
assert isinstance(task_result, EagerResult)
assert task_result.result == 3
| true | true |
f71eafe0c6ec1bcc5a035105c312dffa9e35645e | 158 | py | Python | Concursera - USP/Semana 2/Conversordetemperatura.py | Pablopfrj/Cursos-de-Python | 805f1fa42d41e842df66d24420fed0f5c0cdc740 | [
"MIT"
] | null | null | null | Concursera - USP/Semana 2/Conversordetemperatura.py | Pablopfrj/Cursos-de-Python | 805f1fa42d41e842df66d24420fed0f5c0cdc740 | [
"MIT"
] | null | null | null | Concursera - USP/Semana 2/Conversordetemperatura.py | Pablopfrj/Cursos-de-Python | 805f1fa42d41e842df66d24420fed0f5c0cdc740 | [
"MIT"
] | null | null | null | temperaturaF = input ("Qual temperatura desejada? ")
K = float(temperaturaF)
temperarturaC = 5*(K - 32)/9
print ('A temepratura celsius é ',temperarturaC) | 22.571429 | 53 | 0.721519 | temperaturaF = input ("Qual temperatura desejada? ")
K = float(temperaturaF)
temperarturaC = 5*(K - 32)/9
print ('A temepratura celsius é ',temperarturaC) | true | true |
f71eb1bcceacb04d1e517066e97c304ca359d409 | 444 | py | Python | Set0/p0_3.py | izzy-el/mitbrazil-intro-python | 193d552832393d193eb24d6881be0ab2a37b41d1 | [
"MIT"
] | null | null | null | Set0/p0_3.py | izzy-el/mitbrazil-intro-python | 193d552832393d193eb24d6881be0ab2a37b41d1 | [
"MIT"
] | null | null | null | Set0/p0_3.py | izzy-el/mitbrazil-intro-python | 193d552832393d193eb24d6881be0ab2a37b41d1 | [
"MIT"
] | null | null | null | kwh_used = 1000
out = 0
if(kwh_used < 500):
out += 500 * 0.45
elif(kwh_used >= 500 and kwh_used < 1500):
out += 500 * 0.45 + ((kwh_used - 500) * 0.74)
elif(kwh_used >= 1500 and kwh_used < 2500):
out += 500 * 0.45 + ((kwh_used - 500) * 0.74) + ((kwh_used - 1500) * 1.25)
elif(kwh_used >= 2500):
out += 500 * 0.45 + ((kwh_used - 500) * 0.74) + ((kwh_used - 1500) * 1.25) + ((kwh_used - 2500) * 2)
out += out * 0.2
print(out) | 31.714286 | 104 | 0.547297 | kwh_used = 1000
out = 0
if(kwh_used < 500):
out += 500 * 0.45
elif(kwh_used >= 500 and kwh_used < 1500):
out += 500 * 0.45 + ((kwh_used - 500) * 0.74)
elif(kwh_used >= 1500 and kwh_used < 2500):
out += 500 * 0.45 + ((kwh_used - 500) * 0.74) + ((kwh_used - 1500) * 1.25)
elif(kwh_used >= 2500):
out += 500 * 0.45 + ((kwh_used - 500) * 0.74) + ((kwh_used - 1500) * 1.25) + ((kwh_used - 2500) * 2)
out += out * 0.2
print(out) | true | true |
f71eb2893c929f8c7aacf04acd0eedb825e80d13 | 2,653 | py | Python | PyStationB/docs/conf.py | BrunoKM/station-b-libraries | ea3591837e4a33f0bef789d905467754c27913b3 | [
"MIT"
] | 6 | 2021-09-29T15:46:55.000Z | 2021-12-14T18:39:51.000Z | PyStationB/docs/conf.py | BrunoKM/station-b-libraries | ea3591837e4a33f0bef789d905467754c27913b3 | [
"MIT"
] | null | null | null | PyStationB/docs/conf.py | BrunoKM/station-b-libraries | ea3591837e4a33f0bef789d905467754c27913b3 | [
"MIT"
] | 3 | 2021-09-27T10:35:20.000Z | 2021-10-02T17:53:07.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- Project information -----------------------------------------------------
project = "PyStationB"
copyright = "2021, Station B, Microsoft Research Cambridge"
author = "Station B, Microsoft Research Cambridge"
# The full version, including alpha/beta/rc tags
release = "0.0.1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.todo",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx_rtd_theme",
# "sphinx.ext.autosectionlabel",
]
napoleon_google_docstring = True
napoleon_use_param = False
napoleon_use_rtype = True
napoleon_attr_annotations = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [] # type: ignore
# Display Todos
todo_include_todos = True
# Module index will look better with this option
modindex_common_prefix = ["abex."]
| 36.847222 | 93 | 0.640407 |
project = "PyStationB"
copyright = "2021, Station B, Microsoft Research Cambridge"
author = "Station B, Microsoft Research Cambridge"
release = "0.0.1"
extensions = [
"sphinx.ext.todo",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx_rtd_theme",
]
napoleon_google_docstring = True
napoleon_use_param = False
napoleon_use_rtype = True
napoleon_attr_annotations = True
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
html_theme = "sphinx_rtd_theme"
html_static_path = []
todo_include_todos = True
modindex_common_prefix = ["abex."]
| true | true |
f71eb38201529059e853f2ffea8fb7a836decd22 | 4,485 | py | Python | arturia_midi.py | rjuang/flstudio-arturia-keylab-mk2 | 91fe800769bb724010122d5f6a67af7f51418682 | [
"MIT"
] | 19 | 2020-11-25T18:31:57.000Z | 2022-03-09T14:45:18.000Z | arturia_midi.py | rjuang/flstudio-arturia-keylab-mk2 | 91fe800769bb724010122d5f6a67af7f51418682 | [
"MIT"
] | 1 | 2021-01-24T11:31:16.000Z | 2021-06-05T21:39:10.000Z | arturia_midi.py | rjuang/flstudio-arturia-keylab-mk2 | 91fe800769bb724010122d5f6a67af7f51418682 | [
"MIT"
] | 3 | 2021-03-20T03:50:10.000Z | 2021-12-24T00:27:49.000Z | import debug
import device
# Status command to use when sending commands between scripts
INTER_SCRIPT_STATUS_BYTE = 0x00
INTER_SCRIPT_DATA1_BTN_DOWN_CMD = 0x01 # Data2 contains the id of the button
INTER_SCRIPT_DATA1_BTN_UP_CMD = 0x02 # Data2 contains the id of the button
INTER_SCRIPT_DATA1_UPDATE_STATE = 0x03 # Data2 contains the status of the update (INTER_SCRIPT_DATA2_STATE_...)
INTER_SCRIPT_DATA1_BEGIN_PAYLOAD_CMD = 0xFE
INTER_SCRIPT_DATA1_END_PAYLOAD_CMD = 0xFF
PAYLOAD_STATUS_BYTE = 0x01
PLUGIN_PORT_NUM = 10
SYSEX_HEADER = [0xF0, 0x00, 0x20, 0x6B, 0x7F, 0x42]
SYSEX_FOOTER = [0xF7]
INTER_SCRIPT_DATA2_STATE_PAD_RECORD_STOP = 0x00
INTER_SCRIPT_DATA2_STATE_PAD_RECORD_START = 0x01
class MidiEventDispatcher:
""" Dispatches a MIDI event after feeding it through a transform function.
MIDI event dispatcher transforms the MIDI event into a value through a transform function provided at construction
time. This value is then used as a key into a lookup table that provides a dispatcher and filter function. If the
filter function returns true, then the event is sent to the dispatcher function.
"""
def __init__(self, transform_fn):
self._transform_fn = transform_fn
# Table contains a mapping of status code -> (callback_fn, filter_fn)
self._dispatch_map = {}
def SetHandler(self, key, callback_fn, filter_fn=None):
""" Associate a handler function and optional filter predicate function to a key.
If the transform of the midi event matches the key, then the event is dispatched to the callback function
given that the filter predicate function also returns true.
:param key: the result value of transform_fn(event) to match against.
:param callback_fn: function that is called with the event in the event the transformed event matches.
:param filter_fn: function that takes an event and returns true if the event should be dispatched. If false
is returned, then the event is dropped and never passed to callback_fn. Not specifying means that callback_fn
is always called if transform_fn matches the key.
"""
def _default_true_fn(_): return True
if filter_fn is None:
filter_fn = _default_true_fn
self._dispatch_map[key] = (callback_fn, filter_fn)
return self
def SetHandlerForKeys(self, keys, callback_fn, filter_fn=None):
""" Associate the same handler for a group of keys. See SetHandler for more details. """
for k in keys:
self.SetHandler(k, callback_fn, filter_fn=filter_fn)
return self
def Dispatch(self, event):
""" Dispatches a midi event to the appropriate listener.
:param event: the event to dispatch.
"""
key = self._transform_fn(event)
processed = False
if key in self._dispatch_map:
callback_fn, filter_fn = self._dispatch_map[key]
if filter_fn(event):
callback_fn(event)
processed = True
else:
debug.log("DISPATCHER", "Event dropped by filter.", event=event)
processed = True
else:
debug.log("DISPATCHER", "No handler found.", event=event)
return processed
def send_to_device(data):
"""Sends a data payload to Arturia device. """
# debug.log('CMD', 'Sending payload: ' + str(data))
# Reference regarding SysEx code : # https://forum.arturia.com/index.php?topic=90496.0
device.midiOutSysex(bytes(SYSEX_HEADER) + bytes(data) + bytes(SYSEX_FOOTER))
def dispatch_message_to_other_scripts(status, data1, data2, payload=None):
""" Sends midi commands to other scripts scripts. """
for i in range(device.dispatchReceiverCount()):
msg = status + (data1 << 8) + (data2 << 16)
if payload is None:
device.dispatch(i, msg)
else:
msg = INTER_SCRIPT_DATA1_BEGIN_PAYLOAD_CMD
msg += (INTER_SCRIPT_DATA1_BEGIN_PAYLOAD_CMD << 8)
device.dispatch(i, msg)
# Send payload
for j in range(0, len(payload), 2):
msg = PAYLOAD_STATUS_BYTE + (payload[j] << 8)
if j + 1 < len(payload):
msg += (payload[j + 1] << 16)
device.dispatch(i, msg)
msg = INTER_SCRIPT_STATUS_BYTE
msg += (INTER_SCRIPT_DATA1_END_PAYLOAD_CMD << 8)
device.dispatch(i, msg)
| 41.915888 | 118 | 0.672464 | import debug
import device
INTER_SCRIPT_STATUS_BYTE = 0x00
INTER_SCRIPT_DATA1_BTN_DOWN_CMD = 0x01
INTER_SCRIPT_DATA1_BTN_UP_CMD = 0x02
INTER_SCRIPT_DATA1_UPDATE_STATE = 0x03
INTER_SCRIPT_DATA1_BEGIN_PAYLOAD_CMD = 0xFE
INTER_SCRIPT_DATA1_END_PAYLOAD_CMD = 0xFF
PAYLOAD_STATUS_BYTE = 0x01
PLUGIN_PORT_NUM = 10
SYSEX_HEADER = [0xF0, 0x00, 0x20, 0x6B, 0x7F, 0x42]
SYSEX_FOOTER = [0xF7]
INTER_SCRIPT_DATA2_STATE_PAD_RECORD_STOP = 0x00
INTER_SCRIPT_DATA2_STATE_PAD_RECORD_START = 0x01
class MidiEventDispatcher:
def __init__(self, transform_fn):
self._transform_fn = transform_fn
self._dispatch_map = {}
def SetHandler(self, key, callback_fn, filter_fn=None):
def _default_true_fn(_): return True
if filter_fn is None:
filter_fn = _default_true_fn
self._dispatch_map[key] = (callback_fn, filter_fn)
return self
def SetHandlerForKeys(self, keys, callback_fn, filter_fn=None):
for k in keys:
self.SetHandler(k, callback_fn, filter_fn=filter_fn)
return self
def Dispatch(self, event):
key = self._transform_fn(event)
processed = False
if key in self._dispatch_map:
callback_fn, filter_fn = self._dispatch_map[key]
if filter_fn(event):
callback_fn(event)
processed = True
else:
debug.log("DISPATCHER", "Event dropped by filter.", event=event)
processed = True
else:
debug.log("DISPATCHER", "No handler found.", event=event)
return processed
def send_to_device(data):
s(data) + bytes(SYSEX_FOOTER))
def dispatch_message_to_other_scripts(status, data1, data2, payload=None):
for i in range(device.dispatchReceiverCount()):
msg = status + (data1 << 8) + (data2 << 16)
if payload is None:
device.dispatch(i, msg)
else:
msg = INTER_SCRIPT_DATA1_BEGIN_PAYLOAD_CMD
msg += (INTER_SCRIPT_DATA1_BEGIN_PAYLOAD_CMD << 8)
device.dispatch(i, msg)
for j in range(0, len(payload), 2):
msg = PAYLOAD_STATUS_BYTE + (payload[j] << 8)
if j + 1 < len(payload):
msg += (payload[j + 1] << 16)
device.dispatch(i, msg)
msg = INTER_SCRIPT_STATUS_BYTE
msg += (INTER_SCRIPT_DATA1_END_PAYLOAD_CMD << 8)
device.dispatch(i, msg)
| true | true |
f71eb4e7d27b7bafa25c7ecc98bfe686ddc35042 | 6,389 | py | Python | app/service/send_notification.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 41 | 2019-11-28T16:58:41.000Z | 2022-01-28T21:11:16.000Z | app/service/send_notification.py | cds-snc/notification-api | b1c1064f291eb860b494c3fa65ac256ad70bf47c | [
"MIT"
] | 1,083 | 2019-07-08T12:57:24.000Z | 2022-03-08T18:53:40.000Z | app/service/send_notification.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 9 | 2020-01-24T19:56:43.000Z | 2022-01-27T21:36:53.000Z | from flask import current_app
from notifications_utils.s3 import S3ObjectNotFound
from notifications_utils.s3 import s3download as utils_s3download
from sqlalchemy.orm.exc import NoResultFound
from app import create_random_identifier
from app.dao.notifications_dao import _update_notification_status
from app.dao.service_email_reply_to_dao import dao_get_reply_to_by_id
from app.dao.service_sms_sender_dao import dao_get_service_sms_senders_by_id
from app.dao.services_dao import dao_fetch_service_by_id
from app.dao.templates_dao import (
dao_get_template_by_id_and_service_id,
get_precompiled_letter_template,
)
from app.dao.users_dao import get_user_by_id
from app.letters.utils import (
get_letter_pdf_filename,
get_page_count,
move_uploaded_pdf_to_letters_bucket,
)
from app.models import (
EMAIL_TYPE,
KEY_TYPE_NORMAL,
LETTER_TYPE,
NOTIFICATION_DELIVERED,
SMS_TYPE,
UPLOAD_LETTERS,
)
from app.notifications.process_notifications import (
persist_notification,
send_notification_to_queue,
)
from app.notifications.validators import (
check_service_has_permission,
check_service_over_daily_message_limit,
validate_and_format_recipient,
validate_template,
)
from app.v2.errors import BadRequestError
def validate_created_by(service, created_by_id):
user = get_user_by_id(created_by_id)
if service not in user.services:
message = 'Can’t create notification - {} is not part of the "{}" service'.format(user.name, service.name)
raise BadRequestError(message=message)
def create_one_off_reference(template_type):
if template_type == LETTER_TYPE:
return create_random_identifier()
return None
def send_one_off_notification(service_id, post_data):
service = dao_fetch_service_by_id(service_id)
template = dao_get_template_by_id_and_service_id(template_id=post_data["template_id"], service_id=service_id)
personalisation = post_data.get("personalisation", None)
validate_template(template.id, personalisation, service, template.template_type)
check_service_over_daily_message_limit(KEY_TYPE_NORMAL, service)
validate_and_format_recipient(
send_to=post_data["to"],
key_type=KEY_TYPE_NORMAL,
service=service,
notification_type=template.template_type,
allow_safelisted_recipients=False,
)
validate_created_by(service, post_data["created_by"])
sender_id = post_data.get("sender_id", None)
reply_to = get_reply_to_text(
notification_type=template.template_type,
sender_id=sender_id,
service=service,
template=template,
)
notification = persist_notification(
template_id=template.id,
template_version=template.version,
template_postage=template.postage,
recipient=post_data["to"],
service=service,
personalisation=personalisation,
notification_type=template.template_type,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
created_by_id=post_data["created_by"],
reply_to_text=reply_to,
reference=create_one_off_reference(template.template_type),
)
if template.template_type == LETTER_TYPE and service.research_mode:
_update_notification_status(
notification,
NOTIFICATION_DELIVERED,
)
else:
send_notification_to_queue(
notification=notification,
research_mode=service.research_mode,
queue=template.queue_to_use(),
)
return {"id": str(notification.id)}
def get_reply_to_text(notification_type, sender_id, service, template):
reply_to = None
if sender_id:
try:
if notification_type == EMAIL_TYPE:
message = "Reply to email address not found"
reply_to = dao_get_reply_to_by_id(service.id, sender_id).email_address
elif notification_type == SMS_TYPE:
message = "SMS sender not found"
reply_to = dao_get_service_sms_senders_by_id(service.id, sender_id).get_reply_to_text()
except NoResultFound:
raise BadRequestError(message=message)
else:
reply_to = template.get_reply_to_text()
return reply_to
def send_pdf_letter_notification(service_id, post_data):
service = dao_fetch_service_by_id(service_id)
check_service_has_permission(LETTER_TYPE, service.permissions)
check_service_has_permission(UPLOAD_LETTERS, service.permissions)
check_service_over_daily_message_limit(KEY_TYPE_NORMAL, service)
validate_created_by(service, post_data["created_by"])
template = get_precompiled_letter_template(service.id)
file_location = "service-{}/{}.pdf".format(service.id, post_data["file_id"])
try:
letter = utils_s3download(current_app.config["TRANSIENT_UPLOADED_LETTERS"], file_location)
except S3ObjectNotFound as e:
current_app.logger.exception(
"Letter {}.pdf not in transient {} bucket".format(
post_data["file_id"], current_app.config["TRANSIENT_UPLOADED_LETTERS"]
)
)
raise e
# Getting the page count won't raise an error since admin has already checked the PDF is valid
billable_units = get_page_count(letter.read())
personalisation = {"address_line_1": post_data["filename"]}
# TODO: stop hard-coding postage as 'second' once we get postage from the admin
notification = persist_notification(
notification_id=post_data["file_id"],
template_id=template.id,
template_version=template.version,
template_postage=template.postage,
recipient=post_data["filename"],
service=service,
personalisation=personalisation,
notification_type=LETTER_TYPE,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
reference=create_one_off_reference(LETTER_TYPE),
client_reference=post_data["filename"],
created_by_id=post_data["created_by"],
billable_units=billable_units,
postage="second",
)
upload_filename = get_letter_pdf_filename(
notification.reference,
notification.service.crown,
is_scan_letter=False,
postage=notification.postage,
)
move_uploaded_pdf_to_letters_bucket(file_location, upload_filename)
return {"id": str(notification.id)}
| 34.722826 | 114 | 0.728283 | from flask import current_app
from notifications_utils.s3 import S3ObjectNotFound
from notifications_utils.s3 import s3download as utils_s3download
from sqlalchemy.orm.exc import NoResultFound
from app import create_random_identifier
from app.dao.notifications_dao import _update_notification_status
from app.dao.service_email_reply_to_dao import dao_get_reply_to_by_id
from app.dao.service_sms_sender_dao import dao_get_service_sms_senders_by_id
from app.dao.services_dao import dao_fetch_service_by_id
from app.dao.templates_dao import (
dao_get_template_by_id_and_service_id,
get_precompiled_letter_template,
)
from app.dao.users_dao import get_user_by_id
from app.letters.utils import (
get_letter_pdf_filename,
get_page_count,
move_uploaded_pdf_to_letters_bucket,
)
from app.models import (
EMAIL_TYPE,
KEY_TYPE_NORMAL,
LETTER_TYPE,
NOTIFICATION_DELIVERED,
SMS_TYPE,
UPLOAD_LETTERS,
)
from app.notifications.process_notifications import (
persist_notification,
send_notification_to_queue,
)
from app.notifications.validators import (
check_service_has_permission,
check_service_over_daily_message_limit,
validate_and_format_recipient,
validate_template,
)
from app.v2.errors import BadRequestError
def validate_created_by(service, created_by_id):
user = get_user_by_id(created_by_id)
if service not in user.services:
message = 'Can’t create notification - {} is not part of the "{}" service'.format(user.name, service.name)
raise BadRequestError(message=message)
def create_one_off_reference(template_type):
if template_type == LETTER_TYPE:
return create_random_identifier()
return None
def send_one_off_notification(service_id, post_data):
service = dao_fetch_service_by_id(service_id)
template = dao_get_template_by_id_and_service_id(template_id=post_data["template_id"], service_id=service_id)
personalisation = post_data.get("personalisation", None)
validate_template(template.id, personalisation, service, template.template_type)
check_service_over_daily_message_limit(KEY_TYPE_NORMAL, service)
validate_and_format_recipient(
send_to=post_data["to"],
key_type=KEY_TYPE_NORMAL,
service=service,
notification_type=template.template_type,
allow_safelisted_recipients=False,
)
validate_created_by(service, post_data["created_by"])
sender_id = post_data.get("sender_id", None)
reply_to = get_reply_to_text(
notification_type=template.template_type,
sender_id=sender_id,
service=service,
template=template,
)
notification = persist_notification(
template_id=template.id,
template_version=template.version,
template_postage=template.postage,
recipient=post_data["to"],
service=service,
personalisation=personalisation,
notification_type=template.template_type,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
created_by_id=post_data["created_by"],
reply_to_text=reply_to,
reference=create_one_off_reference(template.template_type),
)
if template.template_type == LETTER_TYPE and service.research_mode:
_update_notification_status(
notification,
NOTIFICATION_DELIVERED,
)
else:
send_notification_to_queue(
notification=notification,
research_mode=service.research_mode,
queue=template.queue_to_use(),
)
return {"id": str(notification.id)}
def get_reply_to_text(notification_type, sender_id, service, template):
reply_to = None
if sender_id:
try:
if notification_type == EMAIL_TYPE:
message = "Reply to email address not found"
reply_to = dao_get_reply_to_by_id(service.id, sender_id).email_address
elif notification_type == SMS_TYPE:
message = "SMS sender not found"
reply_to = dao_get_service_sms_senders_by_id(service.id, sender_id).get_reply_to_text()
except NoResultFound:
raise BadRequestError(message=message)
else:
reply_to = template.get_reply_to_text()
return reply_to
def send_pdf_letter_notification(service_id, post_data):
service = dao_fetch_service_by_id(service_id)
check_service_has_permission(LETTER_TYPE, service.permissions)
check_service_has_permission(UPLOAD_LETTERS, service.permissions)
check_service_over_daily_message_limit(KEY_TYPE_NORMAL, service)
validate_created_by(service, post_data["created_by"])
template = get_precompiled_letter_template(service.id)
file_location = "service-{}/{}.pdf".format(service.id, post_data["file_id"])
try:
letter = utils_s3download(current_app.config["TRANSIENT_UPLOADED_LETTERS"], file_location)
except S3ObjectNotFound as e:
current_app.logger.exception(
"Letter {}.pdf not in transient {} bucket".format(
post_data["file_id"], current_app.config["TRANSIENT_UPLOADED_LETTERS"]
)
)
raise e
billable_units = get_page_count(letter.read())
personalisation = {"address_line_1": post_data["filename"]}
# TODO: stop hard-coding postage as 'second' once we get postage from the admin
notification = persist_notification(
notification_id=post_data["file_id"],
template_id=template.id,
template_version=template.version,
template_postage=template.postage,
recipient=post_data["filename"],
service=service,
personalisation=personalisation,
notification_type=LETTER_TYPE,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
reference=create_one_off_reference(LETTER_TYPE),
client_reference=post_data["filename"],
created_by_id=post_data["created_by"],
billable_units=billable_units,
postage="second",
)
upload_filename = get_letter_pdf_filename(
notification.reference,
notification.service.crown,
is_scan_letter=False,
postage=notification.postage,
)
move_uploaded_pdf_to_letters_bucket(file_location, upload_filename)
return {"id": str(notification.id)}
| true | true |
f71eb58503fb1ccaf1b57d0371c42c1a0e1947b6 | 2,952 | py | Python | artifact_sdk/model/msgsender/send_message_with_appendix_request_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | artifact_sdk/model/msgsender/send_message_with_appendix_request_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | artifact_sdk/model/msgsender/send_message_with_appendix_request_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: send_message_with_appendix_request.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from artifact_sdk.model.msgsender import send_message_request_data_pb2 as artifact__sdk_dot_model_dot_msgsender_dot_send__message__request__data__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='send_message_with_appendix_request.proto',
package='msgsender',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/msgsender'),
serialized_pb=_b('\n(send_message_with_appendix_request.proto\x12\tmsgsender\x1a<artifact_sdk/model/msgsender/send_message_request_data.proto\"Q\n\x1eSendMessageWithAppendixRequest\x12/\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32!.msgsender.SendMessageRequestDataBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/msgsenderb\x06proto3')
,
dependencies=[artifact__sdk_dot_model_dot_msgsender_dot_send__message__request__data__pb2.DESCRIPTOR,])
_SENDMESSAGEWITHAPPENDIXREQUEST = _descriptor.Descriptor(
name='SendMessageWithAppendixRequest',
full_name='msgsender.SendMessageWithAppendixRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='msgsender.SendMessageWithAppendixRequest.data', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=117,
serialized_end=198,
)
_SENDMESSAGEWITHAPPENDIXREQUEST.fields_by_name['data'].message_type = artifact__sdk_dot_model_dot_msgsender_dot_send__message__request__data__pb2._SENDMESSAGEREQUESTDATA
DESCRIPTOR.message_types_by_name['SendMessageWithAppendixRequest'] = _SENDMESSAGEWITHAPPENDIXREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SendMessageWithAppendixRequest = _reflection.GeneratedProtocolMessageType('SendMessageWithAppendixRequest', (_message.Message,), {
'DESCRIPTOR' : _SENDMESSAGEWITHAPPENDIXREQUEST,
'__module__' : 'send_message_with_appendix_request_pb2'
# @@protoc_insertion_point(class_scope:msgsender.SendMessageWithAppendixRequest)
})
_sym_db.RegisterMessage(SendMessageWithAppendixRequest)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 39.36 | 343 | 0.817751 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from artifact_sdk.model.msgsender import send_message_request_data_pb2 as artifact__sdk_dot_model_dot_msgsender_dot_send__message__request__data__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='send_message_with_appendix_request.proto',
package='msgsender',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/msgsender'),
serialized_pb=_b('\n(send_message_with_appendix_request.proto\x12\tmsgsender\x1a<artifact_sdk/model/msgsender/send_message_request_data.proto\"Q\n\x1eSendMessageWithAppendixRequest\x12/\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32!.msgsender.SendMessageRequestDataBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/msgsenderb\x06proto3')
,
dependencies=[artifact__sdk_dot_model_dot_msgsender_dot_send__message__request__data__pb2.DESCRIPTOR,])
_SENDMESSAGEWITHAPPENDIXREQUEST = _descriptor.Descriptor(
name='SendMessageWithAppendixRequest',
full_name='msgsender.SendMessageWithAppendixRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='msgsender.SendMessageWithAppendixRequest.data', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=117,
serialized_end=198,
)
_SENDMESSAGEWITHAPPENDIXREQUEST.fields_by_name['data'].message_type = artifact__sdk_dot_model_dot_msgsender_dot_send__message__request__data__pb2._SENDMESSAGEREQUESTDATA
DESCRIPTOR.message_types_by_name['SendMessageWithAppendixRequest'] = _SENDMESSAGEWITHAPPENDIXREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SendMessageWithAppendixRequest = _reflection.GeneratedProtocolMessageType('SendMessageWithAppendixRequest', (_message.Message,), {
'DESCRIPTOR' : _SENDMESSAGEWITHAPPENDIXREQUEST,
'__module__' : 'send_message_with_appendix_request_pb2'
# @@protoc_insertion_point(class_scope:msgsender.SendMessageWithAppendixRequest)
})
_sym_db.RegisterMessage(SendMessageWithAppendixRequest)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
f71eb5ed63903bcf15e5dee58dfa773837782770 | 1,333 | py | Python | lib/galaxy/datatypes/display_applications/util.py | emily101-gif/immport-galaxy | 8f353d1f9b4e0d044e1a9d0b1f928b440df78b8c | [
"CC-BY-3.0"
] | 4 | 2018-10-29T18:34:38.000Z | 2021-09-29T23:30:42.000Z | lib/galaxy/datatypes/display_applications/util.py | emily101-gif/immport-galaxy | 8f353d1f9b4e0d044e1a9d0b1f928b440df78b8c | [
"CC-BY-3.0"
] | 30 | 2016-10-20T15:35:12.000Z | 2018-10-02T15:59:54.000Z | lib/galaxy/datatypes/display_applications/util.py | emily101-gif/immport-galaxy | 8f353d1f9b4e0d044e1a9d0b1f928b440df78b8c | [
"CC-BY-3.0"
] | 7 | 2016-11-03T19:11:01.000Z | 2020-05-11T14:23:52.000Z | from Crypto.Cipher import Blowfish
def encode_dataset_user(trans, dataset, user):
# encode dataset id as usual
# encode user id using the dataset create time as the key
dataset_hash = trans.security.encode_id(dataset.id)
if user is None:
user_hash = 'None'
else:
user_hash = str(user.id)
# Pad to a multiple of 8 with leading "!"
user_hash = ("!" * (8 - len(user_hash) % 8)) + user_hash
cipher = Blowfish.new(str(dataset.create_time))
user_hash = cipher.encrypt(user_hash).encode('hex')
return dataset_hash, user_hash
def decode_dataset_user(trans, dataset_hash, user_hash):
# decode dataset id as usual
# decode user id using the dataset create time as the key
dataset_id = trans.security.decode_id(dataset_hash)
dataset = trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get(dataset_id)
assert dataset, "Bad Dataset id provided to decode_dataset_user"
if user_hash in [None, 'None']:
user = None
else:
cipher = Blowfish.new(str(dataset.create_time))
user_id = cipher.decrypt(user_hash.decode('hex')).lstrip("!")
user = trans.sa_session.query(trans.app.model.User).get(int(user_id))
assert user, "A Bad user id was passed to decode_dataset_user"
return dataset, user
| 40.393939 | 95 | 0.687922 | from Crypto.Cipher import Blowfish
def encode_dataset_user(trans, dataset, user):
dataset_hash = trans.security.encode_id(dataset.id)
if user is None:
user_hash = 'None'
else:
user_hash = str(user.id)
user_hash = ("!" * (8 - len(user_hash) % 8)) + user_hash
cipher = Blowfish.new(str(dataset.create_time))
user_hash = cipher.encrypt(user_hash).encode('hex')
return dataset_hash, user_hash
def decode_dataset_user(trans, dataset_hash, user_hash):
dataset_id = trans.security.decode_id(dataset_hash)
dataset = trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get(dataset_id)
assert dataset, "Bad Dataset id provided to decode_dataset_user"
if user_hash in [None, 'None']:
user = None
else:
cipher = Blowfish.new(str(dataset.create_time))
user_id = cipher.decrypt(user_hash.decode('hex')).lstrip("!")
user = trans.sa_session.query(trans.app.model.User).get(int(user_id))
assert user, "A Bad user id was passed to decode_dataset_user"
return dataset, user
| true | true |
f71eb6e5d084bd30a401f2798f8959e21dc37208 | 2,157 | py | Python | examples/phosim/phosim_pipeline.py | jchiang87/workflow_engine | 983e0b1a07e4ca02719a22928028daea2a5fbff4 | [
"BSD-3-Clause"
] | null | null | null | examples/phosim/phosim_pipeline.py | jchiang87/workflow_engine | 983e0b1a07e4ca02719a22928028daea2a5fbff4 | [
"BSD-3-Clause"
] | null | null | null | examples/phosim/phosim_pipeline.py | jchiang87/workflow_engine | 983e0b1a07e4ca02719a22928028daea2a5fbff4 | [
"BSD-3-Clause"
] | null | null | null | """
Script to generate xml for running phosim jobs with the SLAC workflow engine.
"""
from __future__ import absolute_import, print_function
import os
import desc.workflow_engine.workflow_engine as engine
pipeline = engine.Pipeline('JC_phoSim_pipeline', '0.1')
main_task = pipeline.main_task
main_task.notation = 'PhoSim Execution Pipeline'
main_task.set_variables()
# Reset output and script directories at SLAC and NERSC.
slac_root_dir = '/nfs/farm/g/lsst/u/jchiang/workflow_engine_tests/phosim_pipeline'
slac_path = lambda x: os.path.join(slac_root_dir, x)
nersc_root_dir = '/global/cscratch1/sd/jchiang8/workflow_engine_tests/phosim_pipeline'
nersc_path = lambda x: os.path.join(nersc_root_dir, x)
main_task.set_variable('SLAC_OUTPUT_DATA_DIR', slac_path('output'))
main_task.set_variable('NERSC_OUTPUT_DATA_DIR', nersc_path('output'))
main_task.set_variable('SLAC_SCRIPT_LOCATION', slac_path('scripts'))
main_task.set_variable('NERSC_SCRIPT_LOCATION', nersc_path('scripts'))
main_task.set_variable('SCRIPT_NAME', 'phosim_pipeline_workflow.py')
setupVisits = main_task.create_process('setupVisits')
setupPhosim = main_task.create_process('setupPhosim', job_type='script',
requirements=[setupVisits])
singleVisitTask = engine.Task('singleVisitTask')
smokeTest = singleVisitTask.create_process('smokeTest')
runPhoSim = singleVisitTask.create_process('runPhoSim',
requirements=[smokeTest])
phoSimReg = singleVisitTask.create_process('phoSimReg',
requirements=[runPhoSim])
phoSimFinalize = singleVisitTask.create_process('phoSimFinalize',
job_type='script',
requirements=[phoSimReg])
setupPhosim.add_subtask(singleVisitTask)
wrapUp = main_task.create_process('wrapUp', job_type='script',
requirements=[phoSimFinalize])
with open('phosim_pipeline.xml', 'w') as output:
output.write(pipeline.toxml() + '\n')
pipeline.write_python_module(clobber=True)
pipeline.write_process_scripts()
| 38.517857 | 86 | 0.713491 | from __future__ import absolute_import, print_function
import os
import desc.workflow_engine.workflow_engine as engine
pipeline = engine.Pipeline('JC_phoSim_pipeline', '0.1')
main_task = pipeline.main_task
main_task.notation = 'PhoSim Execution Pipeline'
main_task.set_variables()
slac_root_dir = '/nfs/farm/g/lsst/u/jchiang/workflow_engine_tests/phosim_pipeline'
slac_path = lambda x: os.path.join(slac_root_dir, x)
nersc_root_dir = '/global/cscratch1/sd/jchiang8/workflow_engine_tests/phosim_pipeline'
nersc_path = lambda x: os.path.join(nersc_root_dir, x)
main_task.set_variable('SLAC_OUTPUT_DATA_DIR', slac_path('output'))
main_task.set_variable('NERSC_OUTPUT_DATA_DIR', nersc_path('output'))
main_task.set_variable('SLAC_SCRIPT_LOCATION', slac_path('scripts'))
main_task.set_variable('NERSC_SCRIPT_LOCATION', nersc_path('scripts'))
main_task.set_variable('SCRIPT_NAME', 'phosim_pipeline_workflow.py')
setupVisits = main_task.create_process('setupVisits')
setupPhosim = main_task.create_process('setupPhosim', job_type='script',
requirements=[setupVisits])
singleVisitTask = engine.Task('singleVisitTask')
smokeTest = singleVisitTask.create_process('smokeTest')
runPhoSim = singleVisitTask.create_process('runPhoSim',
requirements=[smokeTest])
phoSimReg = singleVisitTask.create_process('phoSimReg',
requirements=[runPhoSim])
phoSimFinalize = singleVisitTask.create_process('phoSimFinalize',
job_type='script',
requirements=[phoSimReg])
setupPhosim.add_subtask(singleVisitTask)
wrapUp = main_task.create_process('wrapUp', job_type='script',
requirements=[phoSimFinalize])
with open('phosim_pipeline.xml', 'w') as output:
output.write(pipeline.toxml() + '\n')
pipeline.write_python_module(clobber=True)
pipeline.write_process_scripts()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.