hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73c5c50de77efcb1f62245e3a82db72ff57d470 | 1,045 | py | Python | test.py | solitone/tic-tac-toe | a6795c42700a333e829649116f41ef2cfbf43c3a | [
"Apache-2.0"
] | null | null | null | test.py | solitone/tic-tac-toe | a6795c42700a333e829649116f41ef2cfbf43c3a | [
"Apache-2.0"
] | null | null | null | test.py | solitone/tic-tac-toe | a6795c42700a333e829649116f41ef2cfbf43c3a | [
"Apache-2.0"
] | null | null | null | from tic_tac_toe.Board import Board, GameResult
from tic_tac_toe.RandomPlayer import RandomPlayer
from tic_tac_toe.MinMaxAgent import MinMaxAgent
from tic_tac_toe.RndMinMaxAgent import RndMinMaxAgent
from tic_tac_toe.HumanPlayer import HumanPlayer
from tic_tac_toe.TQPlayer import TQPlayer
from tic_tac_toe.VFPlayer import VFPlayer
from util import *
# battle(RandomPlayer("RandomPlayer1"), RandomPlayer("RandomPlayer2"), num_games=10000)
# battle(MinMaxAgent(), RandomPlayer(), num_games=10000)
# battle(RandomPlayer(), MinMaxAgent(), num_games=10000)
# battle(MinMaxAgent(), RndMinMaxAgent(), num_games=10000)
#play_game(Board(), RndMinMaxAgent(), HumanPlayer(), silent=False)
#play_game(Board(), VFPlayer(), MinMaxAgent(), silent=False)
player1 = VFPlayer("VFPlayer1", learning_rate=0.1, exploration_rate=0.01, v_init=0.6)
#player1 = TQPlayer()
eval_players(player1, RndMinMaxAgent(), 50)
player1.set_exloration_rate(0.0)
eval_players(player1, RndMinMaxAgent(), 50)
while True:
play_game(Board(), player1, HumanPlayer(), silent=False)
| 41.8 | 87 | 0.799043 | from tic_tac_toe.Board import Board, GameResult
from tic_tac_toe.RandomPlayer import RandomPlayer
from tic_tac_toe.MinMaxAgent import MinMaxAgent
from tic_tac_toe.RndMinMaxAgent import RndMinMaxAgent
from tic_tac_toe.HumanPlayer import HumanPlayer
from tic_tac_toe.TQPlayer import TQPlayer
from tic_tac_toe.VFPlayer import VFPlayer
from util import *
player1 = VFPlayer("VFPlayer1", learning_rate=0.1, exploration_rate=0.01, v_init=0.6)
eval_players(player1, RndMinMaxAgent(), 50)
player1.set_exloration_rate(0.0)
eval_players(player1, RndMinMaxAgent(), 50)
while True:
play_game(Board(), player1, HumanPlayer(), silent=False)
| true | true |
f73c5e41ad8d71f32449834a7ee2cf7ebfbe5b0c | 58,482 | py | Python | tools/building.py | radekdoulik/emscripten | 9bb622f6438c31e85890e2571499e0a3093e7f48 | [
"MIT"
] | null | null | null | tools/building.py | radekdoulik/emscripten | 9bb622f6438c31e85890e2571499e0a3093e7f48 | [
"MIT"
] | null | null | null | tools/building.py | radekdoulik/emscripten | 9bb622f6438c31e85890e2571499e0a3093e7f48 | [
"MIT"
] | null | null | null | # Copyright 2020 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import json
import logging
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
from subprocess import PIPE
from . import diagnostics
from . import response_file
from . import shared
from . import webassembly
from . import config
from .toolchain_profiler import ToolchainProfiler
from .shared import CLANG_CC, CLANG_CXX, PYTHON
from .shared import LLVM_NM, EMCC, EMAR, EMXX, EMRANLIB, WASM_LD, LLVM_AR
from .shared import LLVM_LINK, LLVM_OBJCOPY
from .shared import try_delete, run_process, check_call, exit_with_error
from .shared import configuration, path_from_root
from .shared import asmjs_mangle, DEBUG
from .shared import TEMP_DIR
from .shared import CANONICAL_TEMP_DIR, LLVM_DWARFDUMP, demangle_c_symbol_name
from .shared import get_emscripten_temp_dir, exe_suffix, is_c_symbol
from .utils import WINDOWS
from .settings import settings
logger = logging.getLogger('building')
# Building
binaryen_checked = False
EXPECTED_BINARYEN_VERSION = 101
# cache results of nm - it can be slow to run
nm_cache = {}
# Stores the object files contained in different archive files passed as input
ar_contents = {}
_is_ar_cache = {}
# the exports the user requested
user_requested_exports = []
class ObjectFileInfo:
def __init__(self, returncode, output, defs=set(), undefs=set(), commons=set()):
self.returncode = returncode
self.output = output
self.defs = defs
self.undefs = undefs
self.commons = commons
def is_valid_for_nm(self):
return self.returncode == 0
# llvm-ar appears to just use basenames inside archives. as a result, files
# with the same basename will trample each other when we extract them. to help
# warn of such situations, we warn if there are duplicate entries in the
# archive
def warn_if_duplicate_entries(archive_contents, archive_filename):
if len(archive_contents) != len(set(archive_contents)):
msg = '%s: archive file contains duplicate entries. This is not supported by emscripten. Only the last member with a given name will be linked in which can result in undefined symbols. You should either rename your source files, or use `emar` to create you archives which works around this issue.' % archive_filename
warned = set()
for i in range(len(archive_contents)):
curr = archive_contents[i]
if curr not in warned and curr in archive_contents[i + 1:]:
msg += '\n duplicate: %s' % curr
warned.add(curr)
diagnostics.warning('emcc', msg)
# Extracts the given list of archive files and outputs their contents
def extract_archive_contents(archive_files):
archive_results = shared.run_multiple_processes([[LLVM_AR, 't', a] for a in archive_files], pipe_stdout=True)
unpack_temp_dir = tempfile.mkdtemp('_archive_contents', 'emscripten_temp_')
def clean_at_exit():
try_delete(unpack_temp_dir)
shared.atexit.register(clean_at_exit)
archive_contents = []
for i in range(len(archive_results)):
a = archive_results[i]
contents = [l for l in a.splitlines() if len(l)]
if len(contents) == 0:
logger.debug('Archive %s appears to be empty (recommendation: link an .so instead of .a)' % a)
# `ar` files can only contains filenames. Just to be sure, verify that each
# file has only as filename component and is not absolute
for f in contents:
assert not os.path.dirname(f)
assert not os.path.isabs(f)
warn_if_duplicate_entries(contents, a)
archive_contents += [{
'archive_name': archive_files[i],
'o_files': [os.path.join(unpack_temp_dir, c) for c in contents]
}]
shared.run_multiple_processes([[LLVM_AR, 'xo', a] for a in archive_files], cwd=unpack_temp_dir)
# check that all files were created
for a in archive_contents:
missing_contents = [x for x in a['o_files'] if not os.path.exists(x)]
if missing_contents:
exit_with_error('llvm-ar failed to extract file(s) ' + str(missing_contents) + ' from archive file ' + f + '!')
return archive_contents
def unique_ordered(values):
"""return a list of unique values in an input list, without changing order
(list(set(.)) would change order randomly).
"""
seen = set()
def check(value):
if value in seen:
return False
seen.add(value)
return True
return [v for v in values if check(v)]
# clear caches. this is not normally needed, except if the clang/LLVM
# used changes inside this invocation of Building, which can happen in the benchmarker
# when it compares different builds.
def clear():
nm_cache.clear()
ar_contents.clear()
_is_ar_cache.clear()
# .. but for Popen, we cannot have doublequotes, so provide functionality to
# remove them when needed.
def remove_quotes(arg):
if isinstance(arg, list):
return [remove_quotes(a) for a in arg]
if arg.startswith('"') and arg.endswith('"'):
return arg[1:-1].replace('\\"', '"')
elif arg.startswith("'") and arg.endswith("'"):
return arg[1:-1].replace("\\'", "'")
else:
return arg
def get_building_env(cflags=[]):
env = os.environ.copy()
# point CC etc. to the em* tools.
env['CC'] = EMCC
env['CXX'] = EMXX
env['AR'] = EMAR
env['LD'] = EMCC
env['NM'] = LLVM_NM
env['LDSHARED'] = EMCC
env['RANLIB'] = EMRANLIB
env['EMSCRIPTEN_TOOLS'] = path_from_root('tools')
if cflags:
env['CFLAGS'] = env['EMMAKEN_CFLAGS'] = ' '.join(cflags)
env['HOST_CC'] = CLANG_CC
env['HOST_CXX'] = CLANG_CXX
env['HOST_CFLAGS'] = "-W" # if set to nothing, CFLAGS is used, which we don't want
env['HOST_CXXFLAGS'] = "-W" # if set to nothing, CXXFLAGS is used, which we don't want
env['PKG_CONFIG_LIBDIR'] = path_from_root('system', 'local', 'lib', 'pkgconfig') + os.path.pathsep + path_from_root('system', 'lib', 'pkgconfig')
env['PKG_CONFIG_PATH'] = os.environ.get('EM_PKG_CONFIG_PATH', '')
env['EMSCRIPTEN'] = path_from_root()
env['PATH'] = path_from_root('system', 'bin') + os.pathsep + env['PATH']
env['CROSS_COMPILE'] = path_from_root('em') # produces /path/to/emscripten/em , which then can have 'cc', 'ar', etc appended to it
return env
# Returns a clone of the given environment with all directories that contain
# sh.exe removed from the PATH. Used to work around CMake limitation with
# MinGW Makefiles, where sh.exe is not allowed to be present.
def remove_sh_exe_from_path(env):
env = env.copy()
if not WINDOWS:
return env
path = env['PATH'].split(';')
path = [p for p in path if not os.path.exists(os.path.join(p, 'sh.exe'))]
env['PATH'] = ';'.join(path)
return env
def make_paths_absolute(f):
if f.startswith('-'): # skip flags
return f
else:
return os.path.abspath(f)
# Runs llvm-nm for the given list of files.
# The results are populated in nm_cache
def llvm_nm_multiple(files):
with ToolchainProfiler.profile_block('llvm_nm_multiple'):
if len(files) == 0:
return []
# Run llvm-nm on files that we haven't cached yet
llvm_nm_files = [f for f in files if f not in nm_cache]
# We can issue multiple files in a single llvm-nm calls, but only if those
# files are all .o or .bc files. Because of llvm-nm output format, we cannot
# llvm-nm multiple .a files in one call, but those must be individually checked.
o_files = [f for f in llvm_nm_files if os.path.splitext(f)[1].lower() in ['.o', '.obj', '.bc']]
a_files = [f for f in llvm_nm_files if f not in o_files]
# Issue parallel calls for .a files
if len(a_files) > 0:
results = shared.run_multiple_processes([[LLVM_NM, a] for a in a_files], pipe_stdout=True, check=False)
for i in range(len(results)):
nm_cache[a_files[i]] = parse_symbols(results[i])
# Issue a single batch call for multiple .o files
if len(o_files) > 0:
cmd = [LLVM_NM] + o_files
cmd = get_command_with_possible_response_file(cmd)
results = run_process(cmd, stdout=PIPE, stderr=PIPE, check=False)
# If one or more of the input files cannot be processed, llvm-nm will return a non-zero error code, but it will still process and print
# out all the other files in order. So even if process return code is non zero, we should always look at what we got to stdout.
if results.returncode != 0:
logger.debug('Subcommand ' + ' '.join(cmd) + ' failed with return code ' + str(results.returncode) + '! (An input file was corrupt?)')
results = results.stdout
# llvm-nm produces a single listing of form
# file1.o:
# 00000001 T __original_main
# U __stack_pointer
#
# file2.o:
# 0000005d T main
# U printf
#
# ...
# so loop over the report to extract the results
# for each individual file.
filename = o_files[0]
# When we dispatched more than one file, we must manually parse
# the file result delimiters (like shown structured above)
if len(o_files) > 1:
file_start = 0
i = 0
while True:
nl = results.find('\n', i)
if nl < 0:
break
colon = results.rfind(':', i, nl)
if colon >= 0 and results[colon + 1] == '\n': # New file start?
nm_cache[filename] = parse_symbols(results[file_start:i - 1])
filename = results[i:colon].strip()
file_start = colon + 2
i = nl + 1
nm_cache[filename] = parse_symbols(results[file_start:])
else:
# We only dispatched a single file, so can parse all of the result directly
# to that file.
nm_cache[filename] = parse_symbols(results)
return [nm_cache[f] if f in nm_cache else ObjectFileInfo(1, '') for f in files]
def llvm_nm(file):
return llvm_nm_multiple([file])[0]
def read_link_inputs(files):
with ToolchainProfiler.profile_block('read_link_inputs'):
# Before performing the link, we need to look at each input file to determine which symbols
# each of them provides. Do this in multiple parallel processes.
archive_names = [] # .a files passed in to the command line to the link
object_names = [] # .o/.bc files passed in to the command line to the link
for f in files:
absolute_path_f = make_paths_absolute(f)
if absolute_path_f not in ar_contents and is_ar(absolute_path_f):
archive_names.append(absolute_path_f)
elif absolute_path_f not in nm_cache and is_bitcode(absolute_path_f):
object_names.append(absolute_path_f)
# Archives contain objects, so process all archives first in parallel to obtain the object files in them.
archive_contents = extract_archive_contents(archive_names)
for a in archive_contents:
ar_contents[os.path.abspath(a['archive_name'])] = a['o_files']
for o in a['o_files']:
if o not in nm_cache:
object_names.append(o)
# Next, extract symbols from all object files (either standalone or inside archives we just extracted)
# The results are not used here directly, but populated to llvm-nm cache structure.
llvm_nm_multiple(object_names)
def llvm_backend_args():
# disable slow and relatively unimportant optimization passes
args = ['-combiner-global-alias-analysis=false']
# asm.js-style exception handling
if not settings.DISABLE_EXCEPTION_CATCHING:
args += ['-enable-emscripten-cxx-exceptions']
if settings.EXCEPTION_CATCHING_ALLOWED:
# When 'main' has a non-standard signature, LLVM outlines its content out to
# '__original_main'. So we add it to the allowed list as well.
if 'main' in settings.EXCEPTION_CATCHING_ALLOWED:
settings.EXCEPTION_CATCHING_ALLOWED += ['__original_main']
allowed = ','.join(settings.EXCEPTION_CATCHING_ALLOWED)
args += ['-emscripten-cxx-exceptions-allowed=' + allowed]
if settings.SUPPORT_LONGJMP:
# asm.js-style setjmp/longjmp handling
args += ['-enable-emscripten-sjlj']
# better (smaller, sometimes faster) codegen, see binaryen#1054
# and https://bugs.llvm.org/show_bug.cgi?id=39488
args += ['-disable-lsr']
return args
def link_to_object(args, target):
# link using lld unless LTO is requested (lld can't output LTO/bitcode object files).
if not settings.LTO:
link_lld(args + ['--relocatable'], target)
else:
link_bitcode(args, target)
def link_llvm(linker_inputs, target):
# runs llvm-link to link things.
cmd = [LLVM_LINK] + linker_inputs + ['-o', target]
cmd = get_command_with_possible_response_file(cmd)
check_call(cmd)
def lld_flags_for_executable(external_symbol_list):
cmd = []
if external_symbol_list:
undefs = configuration.get_temp_files().get('.undefined').name
with open(undefs, 'w') as f:
f.write('\n'.join(external_symbol_list))
cmd.append('--allow-undefined-file=%s' % undefs)
else:
cmd.append('--allow-undefined')
if settings.IMPORTED_MEMORY:
cmd.append('--import-memory')
if settings.USE_PTHREADS:
cmd.append('--shared-memory')
if settings.MEMORY64:
cmd.append('-mwasm64')
# wasm-ld can strip debug info for us. this strips both the Names
# section and DWARF, so we can only use it when we don't need any of
# those things.
if settings.DEBUG_LEVEL < 2 and (not settings.EMIT_SYMBOL_MAP and
not settings.PROFILING_FUNCS and
not settings.ASYNCIFY):
cmd.append('--strip-debug')
if settings.LINKABLE:
cmd.append('--export-all')
cmd.append('--no-gc-sections')
else:
c_exports = [e for e in settings.EXPORTED_FUNCTIONS if is_c_symbol(e)]
# Strip the leading underscores
c_exports = [demangle_c_symbol_name(e) for e in c_exports]
if external_symbol_list:
# Filter out symbols external/JS symbols
c_exports = [e for e in c_exports if e not in external_symbol_list]
for export in c_exports:
cmd += ['--export', export]
if settings.RELOCATABLE:
cmd.append('--experimental-pic')
if settings.SIDE_MODULE:
cmd.append('-shared')
else:
cmd.append('-pie')
if not settings.LINKABLE:
cmd.append('--no-export-dynamic')
else:
cmd.append('--export-table')
if settings.ALLOW_TABLE_GROWTH:
cmd.append('--growable-table')
if not settings.SIDE_MODULE:
# Export these two section start symbols so that we can extact the string
# data that they contain.
cmd += [
'--export', '__start_em_asm',
'--export', '__stop_em_asm',
'-z', 'stack-size=%s' % settings.TOTAL_STACK,
'--initial-memory=%d' % settings.INITIAL_MEMORY,
]
if settings.STANDALONE_WASM:
# when settings.EXPECT_MAIN is set we fall back to wasm-ld default of _start
if not settings.EXPECT_MAIN:
cmd += ['--entry=_initialize']
else:
if settings.EXPECT_MAIN and not settings.IGNORE_MISSING_MAIN:
cmd += ['--entry=main']
else:
cmd += ['--no-entry']
if not settings.ALLOW_MEMORY_GROWTH:
cmd.append('--max-memory=%d' % settings.INITIAL_MEMORY)
elif settings.MAXIMUM_MEMORY != -1:
cmd.append('--max-memory=%d' % settings.MAXIMUM_MEMORY)
if not settings.RELOCATABLE:
cmd.append('--global-base=%s' % settings.GLOBAL_BASE)
return cmd
def link_lld(args, target, external_symbol_list=None):
if not os.path.exists(WASM_LD):
exit_with_error('linker binary not found in LLVM directory: %s', WASM_LD)
# runs lld to link things.
# lld doesn't currently support --start-group/--end-group since the
# semantics are more like the windows linker where there is no need for
# grouping.
args = [a for a in args if a not in ('--start-group', '--end-group')]
# Emscripten currently expects linkable output (SIDE_MODULE/MAIN_MODULE) to
# include all archive contents.
if settings.LINKABLE:
args.insert(0, '--whole-archive')
args.append('--no-whole-archive')
if settings.STRICT:
args.append('--fatal-warnings')
cmd = [WASM_LD, '-o', target] + args
for a in llvm_backend_args():
cmd += ['-mllvm', a]
# For relocatable output (generating an object file) we don't pass any of the
# normal linker flags that are used when building and exectuable
if '--relocatable' not in args and '-r' not in args:
cmd += lld_flags_for_executable(external_symbol_list)
cmd = get_command_with_possible_response_file(cmd)
check_call(cmd)
def link_bitcode(args, target, force_archive_contents=False):
# "Full-featured" linking: looks into archives (duplicates lld functionality)
input_files = [a for a in args if not a.startswith('-')]
files_to_link = []
# Tracking unresolveds is necessary for .a linking, see below.
# Specify all possible entry points to seed the linking process.
# For a simple application, this would just be "main".
unresolved_symbols = set([func[1:] for func in settings.EXPORTED_FUNCTIONS])
resolved_symbols = set()
# Paths of already included object files from archives.
added_contents = set()
has_ar = any(is_ar(make_paths_absolute(f)) for f in input_files)
# If we have only one archive or the force_archive_contents flag is set,
# then we will add every object file we see, regardless of whether it
# resolves any undefined symbols.
force_add_all = len(input_files) == 1 or force_archive_contents
# Considers an object file for inclusion in the link. The object is included
# if force_add=True or if the object provides a currently undefined symbol.
# If the object is included, the symbol tables are updated and the function
# returns True.
def consider_object(f, force_add=False):
new_symbols = llvm_nm(f)
# Check if the object was valid according to llvm-nm. It also accepts
# native object files.
if not new_symbols.is_valid_for_nm():
diagnostics.warning('emcc', 'object %s is not valid according to llvm-nm, cannot link', f)
return False
# Check the object is valid for us, and not a native object file.
if not is_bitcode(f):
exit_with_error('unknown file type: %s', f)
provided = new_symbols.defs.union(new_symbols.commons)
do_add = force_add or not unresolved_symbols.isdisjoint(provided)
if do_add:
logger.debug('adding object %s to link (forced: %d)' % (f, force_add))
# Update resolved_symbols table with newly resolved symbols
resolved_symbols.update(provided)
# Update unresolved_symbols table by adding newly unresolved symbols and
# removing newly resolved symbols.
unresolved_symbols.update(new_symbols.undefs.difference(resolved_symbols))
unresolved_symbols.difference_update(provided)
files_to_link.append(f)
return do_add
# Traverse a single archive. The object files are repeatedly scanned for
# newly satisfied symbols until no new symbols are found. Returns true if
# any object files were added to the link.
def consider_archive(f, force_add):
added_any_objects = False
loop_again = True
logger.debug('considering archive %s' % (f))
contents = ar_contents[f]
while loop_again: # repeatedly traverse until we have everything we need
loop_again = False
for content in contents:
if content in added_contents:
continue
# Link in the .o if it provides symbols, *or* this is a singleton archive (which is
# apparently an exception in gcc ld)
if consider_object(content, force_add=force_add):
added_contents.add(content)
loop_again = True
added_any_objects = True
logger.debug('done running loop of archive %s' % (f))
return added_any_objects
read_link_inputs(input_files)
# Rescan a group of archives until we don't find any more objects to link.
def scan_archive_group(group):
loop_again = True
logger.debug('starting archive group loop')
while loop_again:
loop_again = False
for archive in group:
if consider_archive(archive, force_add=False):
loop_again = True
logger.debug('done with archive group loop')
current_archive_group = None
in_whole_archive = False
for a in args:
if a.startswith('-'):
if a in ['--start-group', '-(']:
assert current_archive_group is None, 'Nested --start-group, missing --end-group?'
current_archive_group = []
elif a in ['--end-group', '-)']:
assert current_archive_group is not None, '--end-group without --start-group'
scan_archive_group(current_archive_group)
current_archive_group = None
elif a in ['--whole-archive', '-whole-archive']:
in_whole_archive = True
elif a in ['--no-whole-archive', '-no-whole-archive']:
in_whole_archive = False
else:
# Command line flags should already be vetted by the time this method
# is called, so this is an internal error
exit_with_error('unsupported link flag: %s', a)
else:
lib_path = make_paths_absolute(a)
if is_ar(lib_path):
# Extract object files from ar archives, and link according to gnu ld semantics
# (link in an entire .o from the archive if it supplies symbols still unresolved)
consider_archive(lib_path, in_whole_archive or force_add_all)
# If we're inside a --start-group/--end-group section, add to the list
# so we can loop back around later.
if current_archive_group is not None:
current_archive_group.append(lib_path)
elif is_bitcode(lib_path):
if has_ar:
consider_object(a, force_add=True)
else:
# If there are no archives then we can simply link all valid object
# files and skip the symbol table stuff.
files_to_link.append(a)
else:
exit_with_error('unknown file type: %s', a)
# We have to consider the possibility that --start-group was used without a matching
# --end-group; GNU ld permits this behavior and implicitly treats the end of the
# command line as having an --end-group.
if current_archive_group:
logger.debug('--start-group without matching --end-group, rescanning')
scan_archive_group(current_archive_group)
current_archive_group = None
try_delete(target)
# Finish link
# tolerate people trying to link a.so a.so etc.
files_to_link = unique_ordered(files_to_link)
logger.debug('emcc: linking: %s to %s', files_to_link, target)
link_llvm(files_to_link, target)
def get_command_with_possible_response_file(cmd):
# 8k is a bit of an arbitrary limit, but a reasonable one
# for max command line size before we use a response file
if len(' '.join(cmd)) <= 8192:
return cmd
logger.debug('using response file for %s' % cmd[0])
filename = response_file.create_response_file(cmd[1:], TEMP_DIR)
new_cmd = [cmd[0], "@" + filename]
return new_cmd
def parse_symbols(output):
defs = []
undefs = []
commons = []
for line in output.split('\n'):
if not line or line[0] == '#':
continue
# e.g. filename.o: , saying which file it's from
if ':' in line:
continue
parts = [seg for seg in line.split(' ') if len(seg)]
# pnacl-nm will print zero offsets for bitcode, and newer llvm-nm will print present symbols
# as -------- T name
if len(parts) == 3 and parts[0] == "--------" or re.match(r'^[\da-f]{8}$', parts[0]):
parts.pop(0)
if len(parts) == 2:
# ignore lines with absolute offsets, these are not bitcode anyhow
# e.g. |00000630 t d_source_name|
status, symbol = parts
if status == 'U':
undefs.append(symbol)
elif status == 'C':
commons.append(symbol)
elif status == status.upper():
# FIXME: using WTD in the previous line fails due to llvm-nm behavior on macOS,
# so for now we assume all uppercase are normally defined external symbols
defs.append(symbol)
return ObjectFileInfo(0, None, set(defs), set(undefs), set(commons))
def emar(action, output_filename, filenames, stdout=None, stderr=None, env=None):
try_delete(output_filename)
response_filename = response_file.create_response_file(filenames, TEMP_DIR)
cmd = [EMAR, action, output_filename] + ['@' + response_filename]
try:
run_process(cmd, stdout=stdout, stderr=stderr, env=env)
finally:
try_delete(response_filename)
if 'c' in action:
assert os.path.exists(output_filename), 'emar could not create output file: ' + output_filename
def opt_level_to_str(opt_level, shrink_level=0):
# convert opt_level/shrink_level pair to a string argument like -O1
if opt_level == 0:
return '-O0'
if shrink_level == 1:
return '-Os'
elif shrink_level >= 2:
return '-Oz'
else:
return '-O' + str(min(opt_level, 3))
def js_optimizer(filename, passes):
from . import js_optimizer
try:
return js_optimizer.run(filename, passes)
except subprocess.CalledProcessError as e:
exit_with_error("'%s' failed (%d)", ' '.join(e.cmd), e.returncode)
# run JS optimizer on some JS, ignoring asm.js contents if any - just run on it all
def acorn_optimizer(filename, passes, extra_info=None, return_output=False):
optimizer = path_from_root('tools', 'acorn-optimizer.js')
original_filename = filename
if extra_info is not None:
temp_files = configuration.get_temp_files()
temp = temp_files.get('.js').name
shutil.copyfile(filename, temp)
with open(temp, 'a') as f:
f.write('// EXTRA_INFO: ' + extra_info)
filename = temp
cmd = config.NODE_JS + [optimizer, filename] + passes
# Keep JS code comments intact through the acorn optimization pass so that JSDoc comments
# will be carried over to a later Closure run.
if settings.USE_CLOSURE_COMPILER:
cmd += ['--closureFriendly']
if settings.VERBOSE:
cmd += ['verbose']
if not return_output:
next = original_filename + '.jso.js'
configuration.get_temp_files().note(next)
check_call(cmd, stdout=open(next, 'w'))
save_intermediate(next, '%s.js' % passes[0])
return next
output = check_call(cmd, stdout=PIPE).stdout
return output
# evals ctors. if binaryen_bin is provided, it is the dir of the binaryen tool
# for this, and we are in wasm mode
def eval_ctors(js_file, binary_file, debug_info=False): # noqa
logger.debug('Ctor evalling in the wasm backend is disabled due to https://github.com/emscripten-core/emscripten/issues/9527')
return
# TODO re-enable
# cmd = [PYTHON, path_from_root('tools', 'ctor_evaller.py'), js_file, binary_file, str(settings.INITIAL_MEMORY), str(settings.TOTAL_STACK), str(settings.GLOBAL_BASE), binaryen_bin, str(int(debug_info))]
# if binaryen_bin:
# cmd += get_binaryen_feature_flags()
# check_call(cmd)
def get_closure_compiler():
# First check if the user configured a specific CLOSURE_COMPILER in thier settings
if config.CLOSURE_COMPILER:
return shared.CLOSURE_COMPILER
# Otherwise use the one installed vai npm
cmd = shared.get_npm_cmd('google-closure-compiler')
if not WINDOWS:
# Work around an issue that Closure compiler can take up a lot of memory and crash in an error
# "FATAL ERROR: Ineffective mark-compacts near heap limit Allocation failed - JavaScript heap
# out of memory"
cmd.insert(-1, '--max_old_space_size=8192')
return cmd
def check_closure_compiler(cmd, args, env, allowed_to_fail):
try:
output = run_process(cmd + args + ['--version'], stdout=PIPE, env=env).stdout
except Exception as e:
if allowed_to_fail:
return False
logger.warn(str(e))
exit_with_error('closure compiler ("%s --version") did not execute properly!' % str(cmd))
if 'Version:' not in output:
if allowed_to_fail:
return False
exit_with_error('unrecognized closure compiler --version output (%s):\n%s' % (str(cmd), output))
return True
def closure_compiler(filename, pretty, advanced=True, extra_closure_args=None):
with ToolchainProfiler.profile_block('closure_compiler'):
env = shared.env_with_node_in_path()
user_args = []
env_args = os.environ.get('EMCC_CLOSURE_ARGS')
if env_args:
user_args += shlex.split(env_args)
if extra_closure_args:
user_args += extra_closure_args
# Closure compiler expects JAVA_HOME to be set *and* java.exe to be in the PATH in order
# to enable use the java backend. Without this it will only try the native and JavaScript
# versions of the compiler.
java_bin = os.path.dirname(config.JAVA)
if java_bin:
def add_to_path(dirname):
env['PATH'] = env['PATH'] + os.pathsep + dirname
add_to_path(java_bin)
java_home = os.path.dirname(java_bin)
env.setdefault('JAVA_HOME', java_home)
closure_cmd = get_closure_compiler()
native_closure_compiler_works = check_closure_compiler(closure_cmd, user_args, env, allowed_to_fail=True)
if not native_closure_compiler_works and not any(a.startswith('--platform') for a in user_args):
# Run with Java Closure compiler as a fallback if the native version does not work
user_args.append('--platform=java')
check_closure_compiler(closure_cmd, user_args, env, allowed_to_fail=False)
# Closure externs file contains known symbols to be extern to the minification, Closure
# should not minify these symbol names.
CLOSURE_EXTERNS = [path_from_root('src', 'closure-externs', 'closure-externs.js')]
# Closure compiler needs to know about all exports that come from the wasm module, because to optimize for small code size,
# the exported symbols are added to global scope via a foreach loop in a way that evades Closure's static analysis. With an explicit
# externs file for the exports, Closure is able to reason about the exports.
if settings.WASM_FUNCTION_EXPORTS and not settings.DECLARE_ASM_MODULE_EXPORTS:
# Generate an exports file that records all the exported symbols from the wasm module.
module_exports_suppressions = '\n'.join(['/**\n * @suppress {duplicate, undefinedVars}\n */\nvar %s;\n' % asmjs_mangle(i) for i in settings.WASM_FUNCTION_EXPORTS])
exports_file = configuration.get_temp_files().get('_module_exports.js')
exports_file.write(module_exports_suppressions.encode())
exports_file.close()
CLOSURE_EXTERNS += [exports_file.name]
# Node.js specific externs
if shared.target_environment_may_be('node'):
NODE_EXTERNS_BASE = path_from_root('third_party', 'closure-compiler', 'node-externs')
NODE_EXTERNS = os.listdir(NODE_EXTERNS_BASE)
NODE_EXTERNS = [os.path.join(NODE_EXTERNS_BASE, name) for name in NODE_EXTERNS
if name.endswith('.js')]
CLOSURE_EXTERNS += [path_from_root('src', 'closure-externs', 'node-externs.js')] + NODE_EXTERNS
# V8/SpiderMonkey shell specific externs
if shared.target_environment_may_be('shell'):
V8_EXTERNS = [path_from_root('src', 'closure-externs', 'v8-externs.js')]
SPIDERMONKEY_EXTERNS = [path_from_root('src', 'closure-externs', 'spidermonkey-externs.js')]
CLOSURE_EXTERNS += V8_EXTERNS + SPIDERMONKEY_EXTERNS
# Web environment specific externs
if shared.target_environment_may_be('web') or shared.target_environment_may_be('worker'):
BROWSER_EXTERNS_BASE = path_from_root('src', 'closure-externs', 'browser-externs')
if os.path.isdir(BROWSER_EXTERNS_BASE):
BROWSER_EXTERNS = os.listdir(BROWSER_EXTERNS_BASE)
BROWSER_EXTERNS = [os.path.join(BROWSER_EXTERNS_BASE, name) for name in BROWSER_EXTERNS
if name.endswith('.js')]
CLOSURE_EXTERNS += BROWSER_EXTERNS
if settings.MINIMAL_RUNTIME and settings.USE_PTHREADS and not settings.MODULARIZE:
CLOSURE_EXTERNS += [path_from_root('src', 'minimal_runtime_worker_externs.js')]
args = ['--compilation_level', 'ADVANCED_OPTIMIZATIONS' if advanced else 'SIMPLE_OPTIMIZATIONS']
# Keep in sync with ecmaVersion in tools/acorn-optimizer.js
args += ['--language_in', 'ECMASCRIPT_2020']
# Tell closure not to do any transpiling or inject any polyfills.
# At some point we may want to look into using this as way to convert to ES5 but
# babel is perhaps a better tool for that.
args += ['--language_out', 'NO_TRANSPILE']
# Tell closure never to inject the 'use strict' directive.
args += ['--emit_use_strict=false']
# Closure compiler is unable to deal with path names that are not 7-bit ASCII:
# https://github.com/google/closure-compiler/issues/3784
tempfiles = configuration.get_temp_files()
outfile = tempfiles.get('.cc.js').name # Safe 7-bit filename
def move_to_safe_7bit_ascii_filename(filename):
safe_filename = tempfiles.get('.js').name # Safe 7-bit filename
shutil.copyfile(filename, safe_filename)
return os.path.relpath(safe_filename, tempfiles.tmpdir)
for e in CLOSURE_EXTERNS:
args += ['--externs', move_to_safe_7bit_ascii_filename(e)]
for i in range(len(user_args)):
if user_args[i] == '--externs':
user_args[i + 1] = move_to_safe_7bit_ascii_filename(user_args[i + 1])
# Specify output file relative to the temp directory to avoid specifying non-7-bit-ASCII path names.
args += ['--js_output_file', os.path.relpath(outfile, tempfiles.tmpdir)]
if settings.IGNORE_CLOSURE_COMPILER_ERRORS:
args.append('--jscomp_off=*')
if pretty:
args += ['--formatting', 'PRETTY_PRINT']
# Specify input file relative to the temp directory to avoid specifying non-7-bit-ASCII path names.
args += ['--js', move_to_safe_7bit_ascii_filename(filename)]
cmd = closure_cmd + args + user_args
logger.debug('closure compiler: ' + ' '.join(cmd))
# Closure compiler does not work if any of the input files contain characters outside the
# 7-bit ASCII range. Therefore make sure the command line we pass does not contain any such
# input files by passing all input filenames relative to the cwd. (user temp directory might
# be in user's home directory, and user's profile name might contain unicode characters)
proc = run_process(cmd, stderr=PIPE, check=False, env=env, cwd=tempfiles.tmpdir)
# XXX Closure bug: if Closure is invoked with --create_source_map, Closure should create a
# outfile.map source map file (https://github.com/google/closure-compiler/wiki/Source-Maps)
# But it looks like it creates such files on Linux(?) even without setting that command line
# flag (and currently we don't), so delete the produced source map file to not leak files in
# temp directory.
try_delete(outfile + '.map')
# Print Closure diagnostics result up front.
if proc.returncode != 0:
logger.error('Closure compiler run failed:\n')
elif len(proc.stderr.strip()) > 0:
if settings.CLOSURE_WARNINGS == 'error':
logger.error('Closure compiler completed with warnings and -s CLOSURE_WARNINGS=error enabled, aborting!\n')
elif settings.CLOSURE_WARNINGS == 'warn':
logger.warn('Closure compiler completed with warnings:\n')
# Print input file (long wall of text!)
if DEBUG == 2 and (proc.returncode != 0 or (len(proc.stderr.strip()) > 0 and settings.CLOSURE_WARNINGS != 'quiet')):
input_file = open(filename, 'r').read().splitlines()
for i in range(len(input_file)):
sys.stderr.write(str(i + 1) + ': ' + input_file[i] + '\n')
if proc.returncode != 0:
logger.error(proc.stderr) # print list of errors (possibly long wall of text if input was minified)
# Exit and print final hint to get clearer output
msg = 'closure compiler failed (rc: %d): %s' % (proc.returncode, shared.shlex_join(cmd))
if not pretty:
msg += ' the error message may be clearer with -g1 and EMCC_DEBUG=2 set'
exit_with_error(msg)
if len(proc.stderr.strip()) > 0 and settings.CLOSURE_WARNINGS != 'quiet':
# print list of warnings (possibly long wall of text if input was minified)
if settings.CLOSURE_WARNINGS == 'error':
logger.error(proc.stderr)
else:
logger.warn(proc.stderr)
# Exit and/or print final hint to get clearer output
if not pretty:
logger.warn('(rerun with -g1 linker flag for an unminified output)')
elif DEBUG != 2:
logger.warn('(rerun with EMCC_DEBUG=2 enabled to dump Closure input file)')
if settings.CLOSURE_WARNINGS == 'error':
exit_with_error('closure compiler produced warnings and -s CLOSURE_WARNINGS=error enabled')
return outfile
# minify the final wasm+JS combination. this is done after all the JS
# and wasm optimizations; here we do the very final optimizations on them
def minify_wasm_js(js_file, wasm_file, expensive_optimizations, minify_whitespace, debug_info):
# start with JSDCE, to clean up obvious JS garbage. When optimizing for size,
# use AJSDCE (aggressive JS DCE, performs multiple iterations). Clean up
# whitespace if necessary too.
passes = []
if not settings.LINKABLE:
passes.append('JSDCE' if not expensive_optimizations else 'AJSDCE')
if minify_whitespace:
passes.append('minifyWhitespace')
if passes:
logger.debug('running cleanup on shell code: ' + ' '.join(passes))
js_file = acorn_optimizer(js_file, passes)
# if we can optimize this js+wasm combination under the assumption no one else
# will see the internals, do so
if not settings.LINKABLE:
# if we are optimizing for size, shrink the combined wasm+JS
# TODO: support this when a symbol map is used
if expensive_optimizations:
js_file = metadce(js_file, wasm_file, minify_whitespace=minify_whitespace, debug_info=debug_info)
# now that we removed unneeded communication between js and wasm, we can clean up
# the js some more.
passes = ['AJSDCE']
if minify_whitespace:
passes.append('minifyWhitespace')
logger.debug('running post-meta-DCE cleanup on shell code: ' + ' '.join(passes))
js_file = acorn_optimizer(js_file, passes)
if settings.MINIFY_WASM_IMPORTS_AND_EXPORTS:
js_file = minify_wasm_imports_and_exports(js_file, wasm_file, minify_whitespace=minify_whitespace, minify_exports=settings.MINIFY_ASMJS_EXPORT_NAMES, debug_info=debug_info)
return js_file
# run binaryen's wasm-metadce to dce both js and wasm
def metadce(js_file, wasm_file, minify_whitespace, debug_info):
logger.debug('running meta-DCE')
temp_files = configuration.get_temp_files()
# first, get the JS part of the graph
extra_info = '{ "exports": [' + ','.join(f'["{asmjs_mangle(x)}", "{x}"]' for x in settings.WASM_FUNCTION_EXPORTS) + ']}'
txt = acorn_optimizer(js_file, ['emitDCEGraph', 'noPrint'], return_output=True, extra_info=extra_info)
graph = json.loads(txt)
# ensure that functions expected to be exported to the outside are roots
for item in graph:
if 'export' in item:
export = item['export']
# wasm backend's exports are prefixed differently inside the wasm
export = asmjs_mangle(export)
if export in user_requested_exports or settings.EXPORT_ALL:
item['root'] = True
# in standalone wasm, always export the memory
if not settings.IMPORTED_MEMORY:
graph.append({
'export': 'memory',
'name': 'emcc$export$memory',
'reaches': [],
'root': True
})
if not settings.RELOCATABLE:
graph.append({
'export': '__indirect_function_table',
'name': 'emcc$export$__indirect_function_table',
'reaches': [],
'root': True
})
# fix wasi imports TODO: support wasm stable with an option?
WASI_IMPORTS = set([
'environ_get',
'environ_sizes_get',
'args_get',
'args_sizes_get',
'fd_write',
'fd_close',
'fd_read',
'fd_seek',
'fd_fdstat_get',
'fd_sync',
'fd_pread',
'fd_pwrite',
'proc_exit',
'clock_res_get',
'clock_time_get',
])
for item in graph:
if 'import' in item and item['import'][1][1:] in WASI_IMPORTS:
item['import'][0] = settings.WASI_MODULE_NAME
# fixup wasm backend prefixing
for item in graph:
if 'import' in item:
if item['import'][1][0] == '_':
item['import'][1] = item['import'][1][1:]
# map import names from wasm to JS, using the actual name the wasm uses for the import
import_name_map = {}
for item in graph:
if 'import' in item:
import_name_map[item['name']] = 'emcc$import$' + item['import'][1]
temp = temp_files.get('.txt').name
txt = json.dumps(graph)
with open(temp, 'w') as f:
f.write(txt)
# run wasm-metadce
out = run_binaryen_command('wasm-metadce',
wasm_file,
wasm_file,
['--graph-file=' + temp],
debug=debug_info,
stdout=PIPE)
# find the unused things in js
unused = []
PREFIX = 'unused: '
for line in out.splitlines():
if line.startswith(PREFIX):
name = line.replace(PREFIX, '').strip()
if name in import_name_map:
name = import_name_map[name]
unused.append(name)
# remove them
passes = ['applyDCEGraphRemovals']
if minify_whitespace:
passes.append('minifyWhitespace')
extra_info = {'unused': unused}
return acorn_optimizer(js_file, passes, extra_info=json.dumps(extra_info))
def asyncify_lazy_load_code(wasm_target, debug):
# create the lazy-loaded wasm. remove the memory segments from it, as memory
# segments have already been applied by the initial wasm, and apply the knowledge
# that it will only rewind, after which optimizations can remove some code
args = ['--remove-memory', '--mod-asyncify-never-unwind']
if settings.OPT_LEVEL > 0:
args.append(opt_level_to_str(settings.OPT_LEVEL, settings.SHRINK_LEVEL))
run_wasm_opt(wasm_target,
wasm_target + '.lazy.wasm',
args=args,
debug=debug)
# re-optimize the original, by applying the knowledge that imports will
# definitely unwind, and we never rewind, after which optimizations can remove
# a lot of code
# TODO: support other asyncify stuff, imports that don't always unwind?
# TODO: source maps etc.
args = ['--mod-asyncify-always-and-only-unwind']
if settings.OPT_LEVEL > 0:
args.append(opt_level_to_str(settings.OPT_LEVEL, settings.SHRINK_LEVEL))
run_wasm_opt(infile=wasm_target,
outfile=wasm_target,
args=args,
debug=debug)
def minify_wasm_imports_and_exports(js_file, wasm_file, minify_whitespace, minify_exports, debug_info):
logger.debug('minifying wasm imports and exports')
# run the pass
if minify_exports:
# standalone wasm mode means we need to emit a wasi import module.
# otherwise, minify even the imported module names.
if settings.MINIFY_WASM_IMPORTED_MODULES:
pass_name = '--minify-imports-and-exports-and-modules'
else:
pass_name = '--minify-imports-and-exports'
else:
pass_name = '--minify-imports'
out = run_wasm_opt(wasm_file, wasm_file,
[pass_name],
debug=debug_info,
stdout=PIPE)
# TODO this is the last tool we run, after normal opts and metadce. it
# might make sense to run Stack IR optimizations here or even -O (as
# metadce which runs before us might open up new general optimization
# opportunities). however, the benefit is less than 0.5%.
# get the mapping
SEP = ' => '
mapping = {}
for line in out.split('\n'):
if SEP in line:
old, new = line.strip().split(SEP)
assert old not in mapping, 'imports must be unique'
mapping[old] = new
# apply them
passes = ['applyImportAndExportNameChanges']
if minify_whitespace:
passes.append('minifyWhitespace')
extra_info = {'mapping': mapping}
return acorn_optimizer(js_file, passes, extra_info=json.dumps(extra_info))
def wasm2js(js_file, wasm_file, opt_level, minify_whitespace, use_closure_compiler, debug_info, symbols_file=None, symbols_file_js=None):
logger.debug('wasm2js')
args = ['--emscripten']
if opt_level > 0:
args += ['-O']
if symbols_file:
args += ['--symbols-file=%s' % symbols_file]
wasm2js_js = run_binaryen_command('wasm2js', wasm_file,
args=args,
debug=debug_info,
stdout=PIPE)
if DEBUG:
with open(os.path.join(get_emscripten_temp_dir(), 'wasm2js-output.js'), 'w') as f:
f.write(wasm2js_js)
# JS optimizations
if opt_level >= 2:
passes = []
if not debug_info and not settings.USE_PTHREADS:
passes += ['minifyNames']
if symbols_file_js:
passes += ['symbolMap=%s' % symbols_file_js]
if minify_whitespace:
passes += ['minifyWhitespace']
passes += ['last']
if passes:
# hackish fixups to work around wasm2js style and the js optimizer FIXME
wasm2js_js = '// EMSCRIPTEN_START_ASM\n' + wasm2js_js + '// EMSCRIPTEN_END_ASM\n'
wasm2js_js = wasm2js_js.replace('// EMSCRIPTEN_START_FUNCS;\n', '// EMSCRIPTEN_START_FUNCS\n')
wasm2js_js = wasm2js_js.replace('// EMSCRIPTEN_END_FUNCS;\n', '// EMSCRIPTEN_END_FUNCS\n')
wasm2js_js = wasm2js_js.replace('\n function $', '\nfunction $')
wasm2js_js = wasm2js_js.replace('\n }', '\n}')
wasm2js_js += '\n// EMSCRIPTEN_GENERATED_FUNCTIONS\n'
temp = configuration.get_temp_files().get('.js').name
with open(temp, 'w') as f:
f.write(wasm2js_js)
temp = js_optimizer(temp, passes)
with open(temp) as f:
wasm2js_js = f.read()
# Closure compiler: in mode 1, we just minify the shell. In mode 2, we
# minify the wasm2js output as well, which is ok since it isn't
# validating asm.js.
# TODO: in the non-closure case, we could run a lightweight general-
# purpose JS minifier here.
if use_closure_compiler == 2:
temp = configuration.get_temp_files().get('.js').name
with open(temp, 'a') as f:
f.write(wasm2js_js)
temp = closure_compiler(temp, pretty=not minify_whitespace, advanced=False)
with open(temp) as f:
wasm2js_js = f.read()
# closure may leave a trailing `;`, which would be invalid given where we place
# this code (inside parens)
wasm2js_js = wasm2js_js.strip()
if wasm2js_js[-1] == ';':
wasm2js_js = wasm2js_js[:-1]
with open(js_file) as f:
all_js = f.read()
# quoted notation, something like Module['__wasm2jsInstantiate__']
finds = re.findall(r'''[\w\d_$]+\[['"]__wasm2jsInstantiate__['"]\]''', all_js)
if not finds:
# post-closure notation, something like a.__wasm2jsInstantiate__
finds = re.findall(r'''[\w\d_$]+\.__wasm2jsInstantiate__''', all_js)
assert len(finds) == 1
marker = finds[0]
all_js = all_js.replace(marker, '(\n' + wasm2js_js + '\n)')
# replace the placeholder with the actual code
js_file = js_file + '.wasm2js.js'
with open(js_file, 'w') as f:
f.write(all_js)
return js_file
def strip(infile, outfile, debug=False, producers=False):
cmd = [LLVM_OBJCOPY, infile, outfile]
if debug:
cmd += ['--remove-section=.debug*']
if producers:
cmd += ['--remove-section=producers']
check_call(cmd)
# extract the DWARF info from the main file, and leave the wasm with
# debug into as a file on the side
# TODO: emit only debug sections in the side file, and not the entire
# wasm as well
def emit_debug_on_side(wasm_file, wasm_file_with_dwarf):
# if the dwarf filename wasn't provided, use the default target + a suffix
wasm_file_with_dwarf = settings.SEPARATE_DWARF
if wasm_file_with_dwarf is True:
wasm_file_with_dwarf = wasm_file + '.debug.wasm'
embedded_path = settings.SEPARATE_DWARF_URL
if not embedded_path:
# a path was provided - make it relative to the wasm.
embedded_path = os.path.relpath(wasm_file_with_dwarf,
os.path.dirname(wasm_file))
# normalize the path to use URL-style separators, per the spec
embedded_path = embedded_path.replace('\\', '/').replace('//', '/')
shutil.move(wasm_file, wasm_file_with_dwarf)
strip(wasm_file_with_dwarf, wasm_file, debug=True)
# embed a section in the main wasm to point to the file with external DWARF,
# see https://yurydelendik.github.io/webassembly-dwarf/#external-DWARF
section_name = b'\x13external_debug_info' # section name, including prefixed size
filename_bytes = embedded_path.encode('utf-8')
contents = webassembly.toLEB(len(filename_bytes)) + filename_bytes
section_size = len(section_name) + len(contents)
with open(wasm_file, 'ab') as f:
f.write(b'\0') # user section is code 0
f.write(webassembly.toLEB(section_size))
f.write(section_name)
f.write(contents)
def little_endian_heap(js_file):
logger.debug('enforcing little endian heap byte order')
return acorn_optimizer(js_file, ['littleEndianHeap'])
def apply_wasm_memory_growth(js_file):
logger.debug('supporting wasm memory growth with pthreads')
fixed = acorn_optimizer(js_file, ['growableHeap'])
ret = js_file + '.pgrow.js'
with open(fixed, 'r') as fixed_f:
with open(ret, 'w') as ret_f:
with open(path_from_root('src', 'growableHeap.js')) as support_code_f:
ret_f.write(support_code_f.read() + '\n' + fixed_f.read())
return ret
def use_unsigned_pointers_in_js(js_file):
logger.debug('using unsigned pointers in JS')
return acorn_optimizer(js_file, ['unsignPointers'])
def instrument_js_for_asan(js_file):
logger.debug('instrumenting JS memory accesses for ASan')
return acorn_optimizer(js_file, ['asanify'])
def instrument_js_for_safe_heap(js_file):
logger.debug('instrumenting JS memory accesses for SAFE_HEAP')
return acorn_optimizer(js_file, ['safeHeap'])
def handle_final_wasm_symbols(wasm_file, symbols_file, debug_info):
logger.debug('handle_final_wasm_symbols')
args = []
if symbols_file:
args += ['--print-function-map']
if not debug_info:
# to remove debug info, we just write to that same file, and without -g
args += ['-o', wasm_file]
else:
# suppress the wasm-opt warning regarding "no output file specified"
args += ['--quiet']
# ignore stderr because if wasm-opt is run without a -o it will warn
output = run_wasm_opt(wasm_file, args=args, stdout=PIPE)
if symbols_file:
with open(symbols_file, 'w') as f:
f.write(output)
def is_ar(filename):
try:
if _is_ar_cache.get(filename):
return _is_ar_cache[filename]
header = open(filename, 'rb').read(8)
sigcheck = header == b'!<arch>\n'
_is_ar_cache[filename] = sigcheck
return sigcheck
except Exception as e:
logger.debug('is_ar failed to test whether file \'%s\' is a llvm archive file! Failed on exception: %s' % (filename, e))
return False
def is_bitcode(filename):
try:
# look for magic signature
b = open(filename, 'rb').read(4)
if b[:2] == b'BC':
return True
# on macOS, there is a 20-byte prefix which starts with little endian
# encoding of 0x0B17C0DE
elif b == b'\xDE\xC0\x17\x0B':
b = bytearray(open(filename, 'rb').read(22))
return b[20:] == b'BC'
except IndexError:
# not enough characters in the input
# note that logging will be done on the caller function
pass
return False
def is_wasm(filename):
magic = open(filename, 'rb').read(4)
return magic == b'\0asm'
# Given the name of a special Emscripten-implemented system library, returns an
# array of absolute paths to JS library files inside emscripten/src/ that
# corresponds to the library name.
def map_to_js_libs(library_name):
# Some native libraries are implemented in Emscripten as system side JS libraries
library_map = {
'c': [],
'dl': [],
'EGL': ['library_egl.js'],
'GL': ['library_webgl.js', 'library_html5_webgl.js'],
'webgl.js': ['library_webgl.js', 'library_html5_webgl.js'],
'GLESv2': ['library_webgl.js'],
# N.b. there is no GLESv3 to link to (note [f] in https://www.khronos.org/registry/implementers_guide.html)
'GLEW': ['library_glew.js'],
'glfw': ['library_glfw.js'],
'glfw3': ['library_glfw.js'],
'GLU': [],
'glut': ['library_glut.js'],
'm': [],
'openal': ['library_openal.js'],
'rt': [],
'pthread': [],
'X11': ['library_xlib.js'],
'SDL': ['library_sdl.js'],
'stdc++': [],
'uuid': ['library_uuid.js'],
'websocket': ['library_websocket.js']
}
if library_name in library_map:
libs = library_map[library_name]
logger.debug('Mapping library `%s` to JS libraries: %s' % (library_name, libs))
return libs
if library_name.endswith('.js') and os.path.isfile(path_from_root('src', 'library_' + library_name)):
return ['library_' + library_name]
return None
# Map a linker flag to a settings. This lets a user write -lSDL2 and it will have the same effect as
# -s USE_SDL=2.
def map_and_apply_to_settings(library_name):
# most libraries just work, because the -l name matches the name of the
# library we build. however, if a library has variations, which cause us to
# build multiple versions with multiple names, then we need this mechanism.
library_map = {
# SDL2_mixer's built library name contains the specific codecs built in.
'SDL2_mixer': [('USE_SDL_MIXER', 2)],
}
if library_name in library_map:
for key, value in library_map[library_name]:
logger.debug('Mapping library `%s` to settings changes: %s = %s' % (library_name, key, value))
setattr(settings, key, value)
return True
return False
def emit_wasm_source_map(wasm_file, map_file, final_wasm):
# source file paths must be relative to the location of the map (which is
# emitted alongside the wasm)
base_path = os.path.dirname(os.path.abspath(final_wasm))
sourcemap_cmd = [PYTHON, path_from_root('tools', 'wasm-sourcemap.py'),
wasm_file,
'--dwarfdump=' + LLVM_DWARFDUMP,
'-o', map_file,
'--basepath=' + base_path]
check_call(sourcemap_cmd)
def get_binaryen_feature_flags():
# settings.BINARYEN_FEATURES is empty unless features have been extracted by
# wasm-emscripten-finalize already.
if settings.BINARYEN_FEATURES:
return settings.BINARYEN_FEATURES
else:
return ['--detect-features']
def check_binaryen(bindir):
opt = os.path.join(bindir, exe_suffix('wasm-opt'))
if not os.path.exists(opt):
exit_with_error('binaryen executable not found (%s). Please check your binaryen installation' % opt)
try:
output = run_process([opt, '--version'], stdout=PIPE).stdout
except subprocess.CalledProcessError:
exit_with_error('error running binaryen executable (%s). Please check your binaryen installation' % opt)
if output:
output = output.splitlines()[0]
try:
version = output.split()[2]
version = int(version)
except (IndexError, ValueError):
exit_with_error('error parsing binaryen version (%s). Please check your binaryen installation (%s)' % (output, opt))
# Allow the expected version or the following one in order avoid needing to update both
# emscripten and binaryen in lock step in emscripten-releases.
if version not in (EXPECTED_BINARYEN_VERSION, EXPECTED_BINARYEN_VERSION + 1):
diagnostics.warning('version-check', 'unexpected binaryen version: %s (expected %s)', version, EXPECTED_BINARYEN_VERSION)
def get_binaryen_bin():
global binaryen_checked
rtn = os.path.join(config.BINARYEN_ROOT, 'bin')
if not binaryen_checked:
check_binaryen(rtn)
binaryen_checked = True
return rtn
def run_binaryen_command(tool, infile, outfile=None, args=[], debug=False, stdout=None):
cmd = [os.path.join(get_binaryen_bin(), tool)]
if outfile and tool == 'wasm-opt' and \
(settings.DEBUG_LEVEL < 3 or settings.GENERATE_SOURCE_MAP):
# remove any dwarf debug info sections, if the debug level is <3, as
# we don't need them; also remove them if we use source maps (which are
# implemented separately from dwarf).
# note that we add this pass first, so that it doesn't interfere with
# the final set of passes (which may generate stack IR, and nothing
# should be run after that)
# TODO: if lld can strip dwarf then we don't need this. atm though it can
# only strip all debug info or none, which includes the name section
# which we may need
# TODO: once fastcomp is gone, either remove source maps entirely, or
# support them by emitting a source map at the end from the dwarf,
# and use llvm-objcopy to remove that final dwarf
cmd += ['--strip-dwarf']
cmd += args
if infile:
cmd += [infile]
if outfile:
cmd += ['-o', outfile]
if settings.ERROR_ON_WASM_CHANGES_AFTER_LINK:
# emit some extra helpful text for common issues
extra = ''
# a plain -O0 build *almost* doesn't need post-link changes, except for
# legalization. show a clear error for those (as the flags the user passed
# in are not enough to see what went wrong)
if settings.LEGALIZE_JS_FFI:
extra += '\nnote: to disable int64 legalization (which requires changes after link) use -s WASM_BIGINT'
if settings.OPT_LEVEL > 0:
extra += '\nnote: -O2+ optimizations always require changes, build with -O0 or -O1 instead'
exit_with_error('changes to the wasm are required after link, but disallowed by ERROR_ON_WASM_CHANGES_AFTER_LINK: ' + str(cmd) + extra)
if debug:
cmd += ['-g'] # preserve the debug info
# if the features are not already handled, handle them
cmd += get_binaryen_feature_flags()
# if we are emitting a source map, every time we load and save the wasm
# we must tell binaryen to update it
if settings.GENERATE_SOURCE_MAP and outfile:
cmd += ['--input-source-map=' + infile + '.map']
cmd += ['--output-source-map=' + outfile + '.map']
ret = check_call(cmd, stdout=stdout).stdout
if outfile:
save_intermediate(outfile, '%s.wasm' % tool)
return ret
def run_wasm_opt(*args, **kwargs):
return run_binaryen_command('wasm-opt', *args, **kwargs)
save_intermediate_counter = 0
def save_intermediate(src, dst):
if DEBUG:
global save_intermediate_counter
dst = 'emcc-%d-%s' % (save_intermediate_counter, dst)
save_intermediate_counter += 1
dst = os.path.join(CANONICAL_TEMP_DIR, dst)
logger.debug('saving debug copy %s' % dst)
shutil.copyfile(src, dst)
| 39.675712 | 320 | 0.690845 |
import json
import logging
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
from subprocess import PIPE
from . import diagnostics
from . import response_file
from . import shared
from . import webassembly
from . import config
from .toolchain_profiler import ToolchainProfiler
from .shared import CLANG_CC, CLANG_CXX, PYTHON
from .shared import LLVM_NM, EMCC, EMAR, EMXX, EMRANLIB, WASM_LD, LLVM_AR
from .shared import LLVM_LINK, LLVM_OBJCOPY
from .shared import try_delete, run_process, check_call, exit_with_error
from .shared import configuration, path_from_root
from .shared import asmjs_mangle, DEBUG
from .shared import TEMP_DIR
from .shared import CANONICAL_TEMP_DIR, LLVM_DWARFDUMP, demangle_c_symbol_name
from .shared import get_emscripten_temp_dir, exe_suffix, is_c_symbol
from .utils import WINDOWS
from .settings import settings
logger = logging.getLogger('building')
binaryen_checked = False
EXPECTED_BINARYEN_VERSION = 101
nm_cache = {}
ar_contents = {}
_is_ar_cache = {}
user_requested_exports = []
class ObjectFileInfo:
def __init__(self, returncode, output, defs=set(), undefs=set(), commons=set()):
self.returncode = returncode
self.output = output
self.defs = defs
self.undefs = undefs
self.commons = commons
def is_valid_for_nm(self):
return self.returncode == 0
def warn_if_duplicate_entries(archive_contents, archive_filename):
if len(archive_contents) != len(set(archive_contents)):
msg = '%s: archive file contains duplicate entries. This is not supported by emscripten. Only the last member with a given name will be linked in which can result in undefined symbols. You should either rename your source files, or use `emar` to create you archives which works around this issue.' % archive_filename
warned = set()
for i in range(len(archive_contents)):
curr = archive_contents[i]
if curr not in warned and curr in archive_contents[i + 1:]:
msg += '\n duplicate: %s' % curr
warned.add(curr)
diagnostics.warning('emcc', msg)
def extract_archive_contents(archive_files):
archive_results = shared.run_multiple_processes([[LLVM_AR, 't', a] for a in archive_files], pipe_stdout=True)
unpack_temp_dir = tempfile.mkdtemp('_archive_contents', 'emscripten_temp_')
def clean_at_exit():
try_delete(unpack_temp_dir)
shared.atexit.register(clean_at_exit)
archive_contents = []
for i in range(len(archive_results)):
a = archive_results[i]
contents = [l for l in a.splitlines() if len(l)]
if len(contents) == 0:
logger.debug('Archive %s appears to be empty (recommendation: link an .so instead of .a)' % a)
for f in contents:
assert not os.path.dirname(f)
assert not os.path.isabs(f)
warn_if_duplicate_entries(contents, a)
archive_contents += [{
'archive_name': archive_files[i],
'o_files': [os.path.join(unpack_temp_dir, c) for c in contents]
}]
shared.run_multiple_processes([[LLVM_AR, 'xo', a] for a in archive_files], cwd=unpack_temp_dir)
for a in archive_contents:
missing_contents = [x for x in a['o_files'] if not os.path.exists(x)]
if missing_contents:
exit_with_error('llvm-ar failed to extract file(s) ' + str(missing_contents) + ' from archive file ' + f + '!')
return archive_contents
def unique_ordered(values):
seen = set()
def check(value):
if value in seen:
return False
seen.add(value)
return True
return [v for v in values if check(v)]
def clear():
nm_cache.clear()
ar_contents.clear()
_is_ar_cache.clear()
def remove_quotes(arg):
if isinstance(arg, list):
return [remove_quotes(a) for a in arg]
if arg.startswith('"') and arg.endswith('"'):
return arg[1:-1].replace('\\"', '"')
elif arg.startswith("'") and arg.endswith("'"):
return arg[1:-1].replace("\\'", "'")
else:
return arg
def get_building_env(cflags=[]):
env = os.environ.copy()
env['CC'] = EMCC
env['CXX'] = EMXX
env['AR'] = EMAR
env['LD'] = EMCC
env['NM'] = LLVM_NM
env['LDSHARED'] = EMCC
env['RANLIB'] = EMRANLIB
env['EMSCRIPTEN_TOOLS'] = path_from_root('tools')
if cflags:
env['CFLAGS'] = env['EMMAKEN_CFLAGS'] = ' '.join(cflags)
env['HOST_CC'] = CLANG_CC
env['HOST_CXX'] = CLANG_CXX
env['HOST_CFLAGS'] = "-W"
env['HOST_CXXFLAGS'] = "-W" # if set to nothing, CXXFLAGS is used, which we don't want
env['PKG_CONFIG_LIBDIR'] = path_from_root('system', 'local', 'lib', 'pkgconfig') + os.path.pathsep + path_from_root('system', 'lib', 'pkgconfig')
env['PKG_CONFIG_PATH'] = os.environ.get('EM_PKG_CONFIG_PATH', '')
env['EMSCRIPTEN'] = path_from_root()
env['PATH'] = path_from_root('system', 'bin') + os.pathsep + env['PATH']
env['CROSS_COMPILE'] = path_from_root('em')
return env
def remove_sh_exe_from_path(env):
env = env.copy()
if not WINDOWS:
return env
path = env['PATH'].split(';')
path = [p for p in path if not os.path.exists(os.path.join(p, 'sh.exe'))]
env['PATH'] = ';'.join(path)
return env
def make_paths_absolute(f):
if f.startswith('-'):
return f
else:
return os.path.abspath(f)
def llvm_nm_multiple(files):
with ToolchainProfiler.profile_block('llvm_nm_multiple'):
if len(files) == 0:
return []
llvm_nm_files = [f for f in files if f not in nm_cache]
# We can issue multiple files in a single llvm-nm calls, but only if those
# files are all .o or .bc files. Because of llvm-nm output format, we cannot
# llvm-nm multiple .a files in one call, but those must be individually checked.
o_files = [f for f in llvm_nm_files if os.path.splitext(f)[1].lower() in ['.o', '.obj', '.bc']]
a_files = [f for f in llvm_nm_files if f not in o_files]
# Issue parallel calls for .a files
if len(a_files) > 0:
results = shared.run_multiple_processes([[LLVM_NM, a] for a in a_files], pipe_stdout=True, check=False)
for i in range(len(results)):
nm_cache[a_files[i]] = parse_symbols(results[i])
# Issue a single batch call for multiple .o files
if len(o_files) > 0:
cmd = [LLVM_NM] + o_files
cmd = get_command_with_possible_response_file(cmd)
results = run_process(cmd, stdout=PIPE, stderr=PIPE, check=False)
# If one or more of the input files cannot be processed, llvm-nm will return a non-zero error code, but it will still process and print
# out all the other files in order. So even if process return code is non zero, we should always look at what we got to stdout.
if results.returncode != 0:
logger.debug('Subcommand ' + ' '.join(cmd) + ' failed with return code ' + str(results.returncode) + '! (An input file was corrupt?)')
results = results.stdout
# llvm-nm produces a single listing of form
# file1.o:
# 00000001 T __original_main
# U __stack_pointer
#
# file2.o:
# 0000005d T main
# U printf
#
# ...
# so loop over the report to extract the results
# for each individual file.
filename = o_files[0]
# When we dispatched more than one file, we must manually parse
# the file result delimiters (like shown structured above)
if len(o_files) > 1:
file_start = 0
i = 0
while True:
nl = results.find('\n', i)
if nl < 0:
break
colon = results.rfind(':', i, nl)
if colon >= 0 and results[colon + 1] == '\n': # New file start?
nm_cache[filename] = parse_symbols(results[file_start:i - 1])
filename = results[i:colon].strip()
file_start = colon + 2
i = nl + 1
nm_cache[filename] = parse_symbols(results[file_start:])
else:
# We only dispatched a single file, so can parse all of the result directly
# to that file.
nm_cache[filename] = parse_symbols(results)
return [nm_cache[f] if f in nm_cache else ObjectFileInfo(1, '') for f in files]
def llvm_nm(file):
return llvm_nm_multiple([file])[0]
def read_link_inputs(files):
with ToolchainProfiler.profile_block('read_link_inputs'):
# Before performing the link, we need to look at each input file to determine which symbols
# each of them provides. Do this in multiple parallel processes.
archive_names = [] # .a files passed in to the command line to the link
object_names = [] # .o/.bc files passed in to the command line to the link
for f in files:
absolute_path_f = make_paths_absolute(f)
if absolute_path_f not in ar_contents and is_ar(absolute_path_f):
archive_names.append(absolute_path_f)
elif absolute_path_f not in nm_cache and is_bitcode(absolute_path_f):
object_names.append(absolute_path_f)
# Archives contain objects, so process all archives first in parallel to obtain the object files in them.
archive_contents = extract_archive_contents(archive_names)
for a in archive_contents:
ar_contents[os.path.abspath(a['archive_name'])] = a['o_files']
for o in a['o_files']:
if o not in nm_cache:
object_names.append(o)
# Next, extract symbols from all object files (either standalone or inside archives we just extracted)
# The results are not used here directly, but populated to llvm-nm cache structure.
llvm_nm_multiple(object_names)
def llvm_backend_args():
# disable slow and relatively unimportant optimization passes
args = ['-combiner-global-alias-analysis=false']
# asm.js-style exception handling
if not settings.DISABLE_EXCEPTION_CATCHING:
args += ['-enable-emscripten-cxx-exceptions']
if settings.EXCEPTION_CATCHING_ALLOWED:
# When 'main' has a non-standard signature, LLVM outlines its content out to
# '__original_main'. So we add it to the allowed list as well.
if 'main' in settings.EXCEPTION_CATCHING_ALLOWED:
settings.EXCEPTION_CATCHING_ALLOWED += ['__original_main']
allowed = ','.join(settings.EXCEPTION_CATCHING_ALLOWED)
args += ['-emscripten-cxx-exceptions-allowed=' + allowed]
if settings.SUPPORT_LONGJMP:
# asm.js-style setjmp/longjmp handling
args += ['-enable-emscripten-sjlj']
# better (smaller, sometimes faster) codegen, see binaryen#1054
# and https://bugs.llvm.org/show_bug.cgi?id=39488
args += ['-disable-lsr']
return args
def link_to_object(args, target):
# link using lld unless LTO is requested (lld can't output LTO/bitcode object files).
if not settings.LTO:
link_lld(args + ['--relocatable'], target)
else:
link_bitcode(args, target)
def link_llvm(linker_inputs, target):
cmd = [LLVM_LINK] + linker_inputs + ['-o', target]
cmd = get_command_with_possible_response_file(cmd)
check_call(cmd)
def lld_flags_for_executable(external_symbol_list):
cmd = []
if external_symbol_list:
undefs = configuration.get_temp_files().get('.undefined').name
with open(undefs, 'w') as f:
f.write('\n'.join(external_symbol_list))
cmd.append('--allow-undefined-file=%s' % undefs)
else:
cmd.append('--allow-undefined')
if settings.IMPORTED_MEMORY:
cmd.append('--import-memory')
if settings.USE_PTHREADS:
cmd.append('--shared-memory')
if settings.MEMORY64:
cmd.append('-mwasm64')
# those things.
if settings.DEBUG_LEVEL < 2 and (not settings.EMIT_SYMBOL_MAP and
not settings.PROFILING_FUNCS and
not settings.ASYNCIFY):
cmd.append('--strip-debug')
if settings.LINKABLE:
cmd.append('--export-all')
cmd.append('--no-gc-sections')
else:
c_exports = [e for e in settings.EXPORTED_FUNCTIONS if is_c_symbol(e)]
# Strip the leading underscores
c_exports = [demangle_c_symbol_name(e) for e in c_exports]
if external_symbol_list:
# Filter out symbols external/JS symbols
c_exports = [e for e in c_exports if e not in external_symbol_list]
for export in c_exports:
cmd += ['--export', export]
if settings.RELOCATABLE:
cmd.append('--experimental-pic')
if settings.SIDE_MODULE:
cmd.append('-shared')
else:
cmd.append('-pie')
if not settings.LINKABLE:
cmd.append('--no-export-dynamic')
else:
cmd.append('--export-table')
if settings.ALLOW_TABLE_GROWTH:
cmd.append('--growable-table')
if not settings.SIDE_MODULE:
# Export these two section start symbols so that we can extact the string
# data that they contain.
cmd += [
'--export', '__start_em_asm',
'--export', '__stop_em_asm',
'-z', 'stack-size=%s' % settings.TOTAL_STACK,
'--initial-memory=%d' % settings.INITIAL_MEMORY,
]
if settings.STANDALONE_WASM:
# when settings.EXPECT_MAIN is set we fall back to wasm-ld default of _start
if not settings.EXPECT_MAIN:
cmd += ['--entry=_initialize']
else:
if settings.EXPECT_MAIN and not settings.IGNORE_MISSING_MAIN:
cmd += ['--entry=main']
else:
cmd += ['--no-entry']
if not settings.ALLOW_MEMORY_GROWTH:
cmd.append('--max-memory=%d' % settings.INITIAL_MEMORY)
elif settings.MAXIMUM_MEMORY != -1:
cmd.append('--max-memory=%d' % settings.MAXIMUM_MEMORY)
if not settings.RELOCATABLE:
cmd.append('--global-base=%s' % settings.GLOBAL_BASE)
return cmd
def link_lld(args, target, external_symbol_list=None):
if not os.path.exists(WASM_LD):
exit_with_error('linker binary not found in LLVM directory: %s', WASM_LD)
# runs lld to link things.
# lld doesn't currently support --start-group/--end-group since the
args = [a for a in args if a not in ('--start-group', '--end-group')]
if settings.LINKABLE:
args.insert(0, '--whole-archive')
args.append('--no-whole-archive')
if settings.STRICT:
args.append('--fatal-warnings')
cmd = [WASM_LD, '-o', target] + args
for a in llvm_backend_args():
cmd += ['-mllvm', a]
# normal linker flags that are used when building and exectuable
if '--relocatable' not in args and '-r' not in args:
cmd += lld_flags_for_executable(external_symbol_list)
cmd = get_command_with_possible_response_file(cmd)
check_call(cmd)
def link_bitcode(args, target, force_archive_contents=False):
# "Full-featured" linking: looks into archives (duplicates lld functionality)
input_files = [a for a in args if not a.startswith('-')]
files_to_link = []
# Tracking unresolveds is necessary for .a linking, see below.
# Specify all possible entry points to seed the linking process.
# For a simple application, this would just be "main".
unresolved_symbols = set([func[1:] for func in settings.EXPORTED_FUNCTIONS])
resolved_symbols = set()
# Paths of already included object files from archives.
added_contents = set()
has_ar = any(is_ar(make_paths_absolute(f)) for f in input_files)
# If we have only one archive or the force_archive_contents flag is set,
# then we will add every object file we see, regardless of whether it
# resolves any undefined symbols.
force_add_all = len(input_files) == 1 or force_archive_contents
# Considers an object file for inclusion in the link. The object is included
# if force_add=True or if the object provides a currently undefined symbol.
# If the object is included, the symbol tables are updated and the function
# returns True.
def consider_object(f, force_add=False):
new_symbols = llvm_nm(f)
# Check if the object was valid according to llvm-nm. It also accepts
# native object files.
if not new_symbols.is_valid_for_nm():
diagnostics.warning('emcc', 'object %s is not valid according to llvm-nm, cannot link', f)
return False
# Check the object is valid for us, and not a native object file.
if not is_bitcode(f):
exit_with_error('unknown file type: %s', f)
provided = new_symbols.defs.union(new_symbols.commons)
do_add = force_add or not unresolved_symbols.isdisjoint(provided)
if do_add:
logger.debug('adding object %s to link (forced: %d)' % (f, force_add))
# Update resolved_symbols table with newly resolved symbols
resolved_symbols.update(provided)
# Update unresolved_symbols table by adding newly unresolved symbols and
# removing newly resolved symbols.
unresolved_symbols.update(new_symbols.undefs.difference(resolved_symbols))
unresolved_symbols.difference_update(provided)
files_to_link.append(f)
return do_add
# Traverse a single archive. The object files are repeatedly scanned for
# newly satisfied symbols until no new symbols are found. Returns true if
# any object files were added to the link.
def consider_archive(f, force_add):
added_any_objects = False
loop_again = True
logger.debug('considering archive %s' % (f))
contents = ar_contents[f]
while loop_again: # repeatedly traverse until we have everything we need
loop_again = False
for content in contents:
if content in added_contents:
continue
# Link in the .o if it provides symbols, *or* this is a singleton archive (which is
# apparently an exception in gcc ld)
if consider_object(content, force_add=force_add):
added_contents.add(content)
loop_again = True
added_any_objects = True
logger.debug('done running loop of archive %s' % (f))
return added_any_objects
read_link_inputs(input_files)
# Rescan a group of archives until we don't find any more objects to link.
def scan_archive_group(group):
loop_again = True
logger.debug('starting archive group loop')
while loop_again:
loop_again = False
for archive in group:
if consider_archive(archive, force_add=False):
loop_again = True
logger.debug('done with archive group loop')
current_archive_group = None
in_whole_archive = False
for a in args:
if a.startswith('-'):
if a in ['--start-group', '-(']:
assert current_archive_group is None, 'Nested --start-group, missing --end-group?'
current_archive_group = []
elif a in ['--end-group', '-)']:
assert current_archive_group is not None, '--end-group without --start-group'
scan_archive_group(current_archive_group)
current_archive_group = None
elif a in ['--whole-archive', '-whole-archive']:
in_whole_archive = True
elif a in ['--no-whole-archive', '-no-whole-archive']:
in_whole_archive = False
else:
exit_with_error('unsupported link flag: %s', a)
else:
lib_path = make_paths_absolute(a)
if is_ar(lib_path):
consider_archive(lib_path, in_whole_archive or force_add_all)
# so we can loop back around later.
if current_archive_group is not None:
current_archive_group.append(lib_path)
elif is_bitcode(lib_path):
if has_ar:
consider_object(a, force_add=True)
else:
# If there are no archives then we can simply link all valid object
# files and skip the symbol table stuff.
files_to_link.append(a)
else:
exit_with_error('unknown file type: %s', a)
# We have to consider the possibility that --start-group was used without a matching
# --end-group; GNU ld permits this behavior and implicitly treats the end of the
# command line as having an --end-group.
if current_archive_group:
logger.debug('--start-group without matching --end-group, rescanning')
scan_archive_group(current_archive_group)
current_archive_group = None
try_delete(target)
# Finish link
# tolerate people trying to link a.so a.so etc.
files_to_link = unique_ordered(files_to_link)
logger.debug('emcc: linking: %s to %s', files_to_link, target)
link_llvm(files_to_link, target)
def get_command_with_possible_response_file(cmd):
# 8k is a bit of an arbitrary limit, but a reasonable one
# for max command line size before we use a response file
if len(' '.join(cmd)) <= 8192:
return cmd
logger.debug('using response file for %s' % cmd[0])
filename = response_file.create_response_file(cmd[1:], TEMP_DIR)
new_cmd = [cmd[0], "@" + filename]
return new_cmd
def parse_symbols(output):
defs = []
undefs = []
commons = []
for line in output.split('\n'):
if not line or line[0] == '
continue
# e.g. filename.o: , saying which file it's from
if ':' in line:
continue
parts = [seg for seg in line.split(' ') if len(seg)]
if len(parts) == 3 and parts[0] == "--------" or re.match(r'^[\da-f]{8}$', parts[0]):
parts.pop(0)
if len(parts) == 2:
status, symbol = parts
if status == 'U':
undefs.append(symbol)
elif status == 'C':
commons.append(symbol)
elif status == status.upper():
defs.append(symbol)
return ObjectFileInfo(0, None, set(defs), set(undefs), set(commons))
def emar(action, output_filename, filenames, stdout=None, stderr=None, env=None):
try_delete(output_filename)
response_filename = response_file.create_response_file(filenames, TEMP_DIR)
cmd = [EMAR, action, output_filename] + ['@' + response_filename]
try:
run_process(cmd, stdout=stdout, stderr=stderr, env=env)
finally:
try_delete(response_filename)
if 'c' in action:
assert os.path.exists(output_filename), 'emar could not create output file: ' + output_filename
def opt_level_to_str(opt_level, shrink_level=0):
if opt_level == 0:
return '-O0'
if shrink_level == 1:
return '-Os'
elif shrink_level >= 2:
return '-Oz'
else:
return '-O' + str(min(opt_level, 3))
def js_optimizer(filename, passes):
from . import js_optimizer
try:
return js_optimizer.run(filename, passes)
except subprocess.CalledProcessError as e:
exit_with_error("'%s' failed (%d)", ' '.join(e.cmd), e.returncode)
def acorn_optimizer(filename, passes, extra_info=None, return_output=False):
optimizer = path_from_root('tools', 'acorn-optimizer.js')
original_filename = filename
if extra_info is not None:
temp_files = configuration.get_temp_files()
temp = temp_files.get('.js').name
shutil.copyfile(filename, temp)
with open(temp, 'a') as f:
f.write('// EXTRA_INFO: ' + extra_info)
filename = temp
cmd = config.NODE_JS + [optimizer, filename] + passes
if settings.USE_CLOSURE_COMPILER:
cmd += ['--closureFriendly']
if settings.VERBOSE:
cmd += ['verbose']
if not return_output:
next = original_filename + '.jso.js'
configuration.get_temp_files().note(next)
check_call(cmd, stdout=open(next, 'w'))
save_intermediate(next, '%s.js' % passes[0])
return next
output = check_call(cmd, stdout=PIPE).stdout
return output
def eval_ctors(js_file, binary_file, debug_info=False):
logger.debug('Ctor evalling in the wasm backend is disabled due to https://github.com/emscripten-core/emscripten/issues/9527')
return
def get_closure_compiler():
if config.CLOSURE_COMPILER:
return shared.CLOSURE_COMPILER
cmd = shared.get_npm_cmd('google-closure-compiler')
if not WINDOWS:
# out of memory"
cmd.insert(-1, '--max_old_space_size=8192')
return cmd
def check_closure_compiler(cmd, args, env, allowed_to_fail):
try:
output = run_process(cmd + args + ['--version'], stdout=PIPE, env=env).stdout
except Exception as e:
if allowed_to_fail:
return False
logger.warn(str(e))
exit_with_error('closure compiler ("%s --version") did not execute properly!' % str(cmd))
if 'Version:' not in output:
if allowed_to_fail:
return False
exit_with_error('unrecognized closure compiler --version output (%s):\n%s' % (str(cmd), output))
return True
def closure_compiler(filename, pretty, advanced=True, extra_closure_args=None):
with ToolchainProfiler.profile_block('closure_compiler'):
env = shared.env_with_node_in_path()
user_args = []
env_args = os.environ.get('EMCC_CLOSURE_ARGS')
if env_args:
user_args += shlex.split(env_args)
if extra_closure_args:
user_args += extra_closure_args
java_bin = os.path.dirname(config.JAVA)
if java_bin:
def add_to_path(dirname):
env['PATH'] = env['PATH'] + os.pathsep + dirname
add_to_path(java_bin)
java_home = os.path.dirname(java_bin)
env.setdefault('JAVA_HOME', java_home)
closure_cmd = get_closure_compiler()
native_closure_compiler_works = check_closure_compiler(closure_cmd, user_args, env, allowed_to_fail=True)
if not native_closure_compiler_works and not any(a.startswith('--platform') for a in user_args):
user_args.append('--platform=java')
check_closure_compiler(closure_cmd, user_args, env, allowed_to_fail=False)
CLOSURE_EXTERNS = [path_from_root('src', 'closure-externs', 'closure-externs.js')]
# externs file for the exports, Closure is able to reason about the exports.
if settings.WASM_FUNCTION_EXPORTS and not settings.DECLARE_ASM_MODULE_EXPORTS:
# Generate an exports file that records all the exported symbols from the wasm module.
module_exports_suppressions = '\n'.join(['/**\n * @suppress {duplicate, undefinedVars}\n */\nvar %s;\n' % asmjs_mangle(i) for i in settings.WASM_FUNCTION_EXPORTS])
exports_file = configuration.get_temp_files().get('_module_exports.js')
exports_file.write(module_exports_suppressions.encode())
exports_file.close()
CLOSURE_EXTERNS += [exports_file.name]
# Node.js specific externs
if shared.target_environment_may_be('node'):
NODE_EXTERNS_BASE = path_from_root('third_party', 'closure-compiler', 'node-externs')
NODE_EXTERNS = os.listdir(NODE_EXTERNS_BASE)
NODE_EXTERNS = [os.path.join(NODE_EXTERNS_BASE, name) for name in NODE_EXTERNS
if name.endswith('.js')]
CLOSURE_EXTERNS += [path_from_root('src', 'closure-externs', 'node-externs.js')] + NODE_EXTERNS
# V8/SpiderMonkey shell specific externs
if shared.target_environment_may_be('shell'):
V8_EXTERNS = [path_from_root('src', 'closure-externs', 'v8-externs.js')]
SPIDERMONKEY_EXTERNS = [path_from_root('src', 'closure-externs', 'spidermonkey-externs.js')]
CLOSURE_EXTERNS += V8_EXTERNS + SPIDERMONKEY_EXTERNS
# Web environment specific externs
if shared.target_environment_may_be('web') or shared.target_environment_may_be('worker'):
BROWSER_EXTERNS_BASE = path_from_root('src', 'closure-externs', 'browser-externs')
if os.path.isdir(BROWSER_EXTERNS_BASE):
BROWSER_EXTERNS = os.listdir(BROWSER_EXTERNS_BASE)
BROWSER_EXTERNS = [os.path.join(BROWSER_EXTERNS_BASE, name) for name in BROWSER_EXTERNS
if name.endswith('.js')]
CLOSURE_EXTERNS += BROWSER_EXTERNS
if settings.MINIMAL_RUNTIME and settings.USE_PTHREADS and not settings.MODULARIZE:
CLOSURE_EXTERNS += [path_from_root('src', 'minimal_runtime_worker_externs.js')]
args = ['--compilation_level', 'ADVANCED_OPTIMIZATIONS' if advanced else 'SIMPLE_OPTIMIZATIONS']
# Keep in sync with ecmaVersion in tools/acorn-optimizer.js
args += ['--language_in', 'ECMASCRIPT_2020']
# Tell closure not to do any transpiling or inject any polyfills.
# At some point we may want to look into using this as way to convert to ES5 but
# babel is perhaps a better tool for that.
args += ['--language_out', 'NO_TRANSPILE']
# Tell closure never to inject the 'use strict' directive.
args += ['--emit_use_strict=false']
# Closure compiler is unable to deal with path names that are not 7-bit ASCII:
# https://github.com/google/closure-compiler/issues/3784
tempfiles = configuration.get_temp_files()
outfile = tempfiles.get('.cc.js').name # Safe 7-bit filename
def move_to_safe_7bit_ascii_filename(filename):
safe_filename = tempfiles.get('.js').name # Safe 7-bit filename
shutil.copyfile(filename, safe_filename)
return os.path.relpath(safe_filename, tempfiles.tmpdir)
for e in CLOSURE_EXTERNS:
args += ['--externs', move_to_safe_7bit_ascii_filename(e)]
for i in range(len(user_args)):
if user_args[i] == '--externs':
user_args[i + 1] = move_to_safe_7bit_ascii_filename(user_args[i + 1])
# Specify output file relative to the temp directory to avoid specifying non-7-bit-ASCII path names.
args += ['--js_output_file', os.path.relpath(outfile, tempfiles.tmpdir)]
if settings.IGNORE_CLOSURE_COMPILER_ERRORS:
args.append('--jscomp_off=*')
if pretty:
args += ['--formatting', 'PRETTY_PRINT']
# Specify input file relative to the temp directory to avoid specifying non-7-bit-ASCII path names.
args += ['--js', move_to_safe_7bit_ascii_filename(filename)]
cmd = closure_cmd + args + user_args
logger.debug('closure compiler: ' + ' '.join(cmd))
# Closure compiler does not work if any of the input files contain characters outside the
# 7-bit ASCII range. Therefore make sure the command line we pass does not contain any such
# input files by passing all input filenames relative to the cwd. (user temp directory might
# be in user's home directory, and user's profile name might contain unicode characters)
proc = run_process(cmd, stderr=PIPE, check=False, env=env, cwd=tempfiles.tmpdir)
# XXX Closure bug: if Closure is invoked with --create_source_map, Closure should create a
# outfile.map source map file (https://github.com/google/closure-compiler/wiki/Source-Maps)
# But it looks like it creates such files on Linux(?) even without setting that command line
# flag (and currently we don't), so delete the produced source map file to not leak files in
try_delete(outfile + '.map')
if proc.returncode != 0:
logger.error('Closure compiler run failed:\n')
elif len(proc.stderr.strip()) > 0:
if settings.CLOSURE_WARNINGS == 'error':
logger.error('Closure compiler completed with warnings and -s CLOSURE_WARNINGS=error enabled, aborting!\n')
elif settings.CLOSURE_WARNINGS == 'warn':
logger.warn('Closure compiler completed with warnings:\n')
if DEBUG == 2 and (proc.returncode != 0 or (len(proc.stderr.strip()) > 0 and settings.CLOSURE_WARNINGS != 'quiet')):
input_file = open(filename, 'r').read().splitlines()
for i in range(len(input_file)):
sys.stderr.write(str(i + 1) + ': ' + input_file[i] + '\n')
if proc.returncode != 0:
logger.error(proc.stderr)
msg = 'closure compiler failed (rc: %d): %s' % (proc.returncode, shared.shlex_join(cmd))
if not pretty:
msg += ' the error message may be clearer with -g1 and EMCC_DEBUG=2 set'
exit_with_error(msg)
if len(proc.stderr.strip()) > 0 and settings.CLOSURE_WARNINGS != 'quiet':
if settings.CLOSURE_WARNINGS == 'error':
logger.error(proc.stderr)
else:
logger.warn(proc.stderr)
if not pretty:
logger.warn('(rerun with -g1 linker flag for an unminified output)')
elif DEBUG != 2:
logger.warn('(rerun with EMCC_DEBUG=2 enabled to dump Closure input file)')
if settings.CLOSURE_WARNINGS == 'error':
exit_with_error('closure compiler produced warnings and -s CLOSURE_WARNINGS=error enabled')
return outfile
def minify_wasm_js(js_file, wasm_file, expensive_optimizations, minify_whitespace, debug_info):
passes = []
if not settings.LINKABLE:
passes.append('JSDCE' if not expensive_optimizations else 'AJSDCE')
if minify_whitespace:
passes.append('minifyWhitespace')
if passes:
logger.debug('running cleanup on shell code: ' + ' '.join(passes))
js_file = acorn_optimizer(js_file, passes)
if not settings.LINKABLE:
if expensive_optimizations:
js_file = metadce(js_file, wasm_file, minify_whitespace=minify_whitespace, debug_info=debug_info)
passes = ['AJSDCE']
if minify_whitespace:
passes.append('minifyWhitespace')
logger.debug('running post-meta-DCE cleanup on shell code: ' + ' '.join(passes))
js_file = acorn_optimizer(js_file, passes)
if settings.MINIFY_WASM_IMPORTS_AND_EXPORTS:
js_file = minify_wasm_imports_and_exports(js_file, wasm_file, minify_whitespace=minify_whitespace, minify_exports=settings.MINIFY_ASMJS_EXPORT_NAMES, debug_info=debug_info)
return js_file
def metadce(js_file, wasm_file, minify_whitespace, debug_info):
logger.debug('running meta-DCE')
temp_files = configuration.get_temp_files()
# first, get the JS part of the graph
extra_info = '{ "exports": [' + ','.join(f'["{asmjs_mangle(x)}", "{x}"]' for x in settings.WASM_FUNCTION_EXPORTS) + ']}'
txt = acorn_optimizer(js_file, ['emitDCEGraph', 'noPrint'], return_output=True, extra_info=extra_info)
graph = json.loads(txt)
# ensure that functions expected to be exported to the outside are roots
for item in graph:
if 'export' in item:
export = item['export']
# wasm backend's exports are prefixed differently inside the wasm
export = asmjs_mangle(export)
if export in user_requested_exports or settings.EXPORT_ALL:
item['root'] = True
if not settings.IMPORTED_MEMORY:
graph.append({
'export': 'memory',
'name': 'emcc$export$memory',
'reaches': [],
'root': True
})
if not settings.RELOCATABLE:
graph.append({
'export': '__indirect_function_table',
'name': 'emcc$export$__indirect_function_table',
'reaches': [],
'root': True
})
WASI_IMPORTS = set([
'environ_get',
'environ_sizes_get',
'args_get',
'args_sizes_get',
'fd_write',
'fd_close',
'fd_read',
'fd_seek',
'fd_fdstat_get',
'fd_sync',
'fd_pread',
'fd_pwrite',
'proc_exit',
'clock_res_get',
'clock_time_get',
])
for item in graph:
if 'import' in item and item['import'][1][1:] in WASI_IMPORTS:
item['import'][0] = settings.WASI_MODULE_NAME
for item in graph:
if 'import' in item:
if item['import'][1][0] == '_':
item['import'][1] = item['import'][1][1:]
import_name_map = {}
for item in graph:
if 'import' in item:
import_name_map[item['name']] = 'emcc$import$' + item['import'][1]
temp = temp_files.get('.txt').name
txt = json.dumps(graph)
with open(temp, 'w') as f:
f.write(txt)
out = run_binaryen_command('wasm-metadce',
wasm_file,
wasm_file,
['--graph-file=' + temp],
debug=debug_info,
stdout=PIPE)
unused = []
PREFIX = 'unused: '
for line in out.splitlines():
if line.startswith(PREFIX):
name = line.replace(PREFIX, '').strip()
if name in import_name_map:
name = import_name_map[name]
unused.append(name)
passes = ['applyDCEGraphRemovals']
if minify_whitespace:
passes.append('minifyWhitespace')
extra_info = {'unused': unused}
return acorn_optimizer(js_file, passes, extra_info=json.dumps(extra_info))
def asyncify_lazy_load_code(wasm_target, debug):
args = ['--remove-memory', '--mod-asyncify-never-unwind']
if settings.OPT_LEVEL > 0:
args.append(opt_level_to_str(settings.OPT_LEVEL, settings.SHRINK_LEVEL))
run_wasm_opt(wasm_target,
wasm_target + '.lazy.wasm',
args=args,
debug=debug)
# TODO: source maps etc.
args = ['--mod-asyncify-always-and-only-unwind']
if settings.OPT_LEVEL > 0:
args.append(opt_level_to_str(settings.OPT_LEVEL, settings.SHRINK_LEVEL))
run_wasm_opt(infile=wasm_target,
outfile=wasm_target,
args=args,
debug=debug)
def minify_wasm_imports_and_exports(js_file, wasm_file, minify_whitespace, minify_exports, debug_info):
logger.debug('minifying wasm imports and exports')
# run the pass
if minify_exports:
# standalone wasm mode means we need to emit a wasi import module.
# otherwise, minify even the imported module names.
if settings.MINIFY_WASM_IMPORTED_MODULES:
pass_name = '--minify-imports-and-exports-and-modules'
else:
pass_name = '--minify-imports-and-exports'
else:
pass_name = '--minify-imports'
out = run_wasm_opt(wasm_file, wasm_file,
[pass_name],
debug=debug_info,
stdout=PIPE)
# TODO this is the last tool we run, after normal opts and metadce. it
# might make sense to run Stack IR optimizations here or even -O (as
# metadce which runs before us might open up new general optimization
# opportunities). however, the benefit is less than 0.5%.
# get the mapping
SEP = ' => '
mapping = {}
for line in out.split('\n'):
if SEP in line:
old, new = line.strip().split(SEP)
assert old not in mapping, 'imports must be unique'
mapping[old] = new
# apply them
passes = ['applyImportAndExportNameChanges']
if minify_whitespace:
passes.append('minifyWhitespace')
extra_info = {'mapping': mapping}
return acorn_optimizer(js_file, passes, extra_info=json.dumps(extra_info))
def wasm2js(js_file, wasm_file, opt_level, minify_whitespace, use_closure_compiler, debug_info, symbols_file=None, symbols_file_js=None):
logger.debug('wasm2js')
args = ['--emscripten']
if opt_level > 0:
args += ['-O']
if symbols_file:
args += ['--symbols-file=%s' % symbols_file]
wasm2js_js = run_binaryen_command('wasm2js', wasm_file,
args=args,
debug=debug_info,
stdout=PIPE)
if DEBUG:
with open(os.path.join(get_emscripten_temp_dir(), 'wasm2js-output.js'), 'w') as f:
f.write(wasm2js_js)
# JS optimizations
if opt_level >= 2:
passes = []
if not debug_info and not settings.USE_PTHREADS:
passes += ['minifyNames']
if symbols_file_js:
passes += ['symbolMap=%s' % symbols_file_js]
if minify_whitespace:
passes += ['minifyWhitespace']
passes += ['last']
if passes:
# hackish fixups to work around wasm2js style and the js optimizer FIXME
wasm2js_js = '// EMSCRIPTEN_START_ASM\n' + wasm2js_js + '// EMSCRIPTEN_END_ASM\n'
wasm2js_js = wasm2js_js.replace('// EMSCRIPTEN_START_FUNCS;\n', '// EMSCRIPTEN_START_FUNCS\n')
wasm2js_js = wasm2js_js.replace('// EMSCRIPTEN_END_FUNCS;\n', '// EMSCRIPTEN_END_FUNCS\n')
wasm2js_js = wasm2js_js.replace('\n function $', '\nfunction $')
wasm2js_js = wasm2js_js.replace('\n }', '\n}')
wasm2js_js += '\n// EMSCRIPTEN_GENERATED_FUNCTIONS\n'
temp = configuration.get_temp_files().get('.js').name
with open(temp, 'w') as f:
f.write(wasm2js_js)
temp = js_optimizer(temp, passes)
with open(temp) as f:
wasm2js_js = f.read()
# Closure compiler: in mode 1, we just minify the shell. In mode 2, we
# minify the wasm2js output as well, which is ok since it isn't
if use_closure_compiler == 2:
temp = configuration.get_temp_files().get('.js').name
with open(temp, 'a') as f:
f.write(wasm2js_js)
temp = closure_compiler(temp, pretty=not minify_whitespace, advanced=False)
with open(temp) as f:
wasm2js_js = f.read()
wasm2js_js = wasm2js_js.strip()
if wasm2js_js[-1] == ';':
wasm2js_js = wasm2js_js[:-1]
with open(js_file) as f:
all_js = f.read()
finds = re.findall(r'''[\w\d_$]+\[['"]__wasm2jsInstantiate__['"]\]''', all_js)
if not finds:
finds = re.findall(r'''[\w\d_$]+\.__wasm2jsInstantiate__''', all_js)
assert len(finds) == 1
marker = finds[0]
all_js = all_js.replace(marker, '(\n' + wasm2js_js + '\n)')
js_file = js_file + '.wasm2js.js'
with open(js_file, 'w') as f:
f.write(all_js)
return js_file
def strip(infile, outfile, debug=False, producers=False):
cmd = [LLVM_OBJCOPY, infile, outfile]
if debug:
cmd += ['--remove-section=.debug*']
if producers:
cmd += ['--remove-section=producers']
check_call(cmd)
def emit_debug_on_side(wasm_file, wasm_file_with_dwarf):
wasm_file_with_dwarf = settings.SEPARATE_DWARF
if wasm_file_with_dwarf is True:
wasm_file_with_dwarf = wasm_file + '.debug.wasm'
embedded_path = settings.SEPARATE_DWARF_URL
if not embedded_path:
# a path was provided - make it relative to the wasm.
embedded_path = os.path.relpath(wasm_file_with_dwarf,
os.path.dirname(wasm_file))
# normalize the path to use URL-style separators, per the spec
embedded_path = embedded_path.replace('\\', '/').replace('//', '/')
shutil.move(wasm_file, wasm_file_with_dwarf)
strip(wasm_file_with_dwarf, wasm_file, debug=True)
# embed a section in the main wasm to point to the file with external DWARF,
# see https://yurydelendik.github.io/webassembly-dwarf/#external-DWARF
section_name = b'\x13external_debug_info' # section name, including prefixed size
filename_bytes = embedded_path.encode('utf-8')
contents = webassembly.toLEB(len(filename_bytes)) + filename_bytes
section_size = len(section_name) + len(contents)
with open(wasm_file, 'ab') as f:
f.write(b'\0') # user section is code 0
f.write(webassembly.toLEB(section_size))
f.write(section_name)
f.write(contents)
def little_endian_heap(js_file):
logger.debug('enforcing little endian heap byte order')
return acorn_optimizer(js_file, ['littleEndianHeap'])
def apply_wasm_memory_growth(js_file):
logger.debug('supporting wasm memory growth with pthreads')
fixed = acorn_optimizer(js_file, ['growableHeap'])
ret = js_file + '.pgrow.js'
with open(fixed, 'r') as fixed_f:
with open(ret, 'w') as ret_f:
with open(path_from_root('src', 'growableHeap.js')) as support_code_f:
ret_f.write(support_code_f.read() + '\n' + fixed_f.read())
return ret
def use_unsigned_pointers_in_js(js_file):
logger.debug('using unsigned pointers in JS')
return acorn_optimizer(js_file, ['unsignPointers'])
def instrument_js_for_asan(js_file):
logger.debug('instrumenting JS memory accesses for ASan')
return acorn_optimizer(js_file, ['asanify'])
def instrument_js_for_safe_heap(js_file):
logger.debug('instrumenting JS memory accesses for SAFE_HEAP')
return acorn_optimizer(js_file, ['safeHeap'])
def handle_final_wasm_symbols(wasm_file, symbols_file, debug_info):
logger.debug('handle_final_wasm_symbols')
args = []
if symbols_file:
args += ['--print-function-map']
if not debug_info:
# to remove debug info, we just write to that same file, and without -g
args += ['-o', wasm_file]
else:
# suppress the wasm-opt warning regarding "no output file specified"
args += ['--quiet']
# ignore stderr because if wasm-opt is run without a -o it will warn
output = run_wasm_opt(wasm_file, args=args, stdout=PIPE)
if symbols_file:
with open(symbols_file, 'w') as f:
f.write(output)
def is_ar(filename):
try:
if _is_ar_cache.get(filename):
return _is_ar_cache[filename]
header = open(filename, 'rb').read(8)
sigcheck = header == b'!<arch>\n'
_is_ar_cache[filename] = sigcheck
return sigcheck
except Exception as e:
logger.debug('is_ar failed to test whether file \'%s\' is a llvm archive file! Failed on exception: %s' % (filename, e))
return False
def is_bitcode(filename):
try:
# look for magic signature
b = open(filename, 'rb').read(4)
if b[:2] == b'BC':
return True
# on macOS, there is a 20-byte prefix which starts with little endian
# encoding of 0x0B17C0DE
elif b == b'\xDE\xC0\x17\x0B':
b = bytearray(open(filename, 'rb').read(22))
return b[20:] == b'BC'
except IndexError:
# not enough characters in the input
# note that logging will be done on the caller function
pass
return False
def is_wasm(filename):
magic = open(filename, 'rb').read(4)
return magic == b'\0asm'
# Given the name of a special Emscripten-implemented system library, returns an
# array of absolute paths to JS library files inside emscripten/src/ that
# corresponds to the library name.
def map_to_js_libs(library_name):
# Some native libraries are implemented in Emscripten as system side JS libraries
library_map = {
'c': [],
'dl': [],
'EGL': ['library_egl.js'],
'GL': ['library_webgl.js', 'library_html5_webgl.js'],
'webgl.js': ['library_webgl.js', 'library_html5_webgl.js'],
'GLESv2': ['library_webgl.js'],
# N.b. there is no GLESv3 to link to (note [f] in https://www.khronos.org/registry/implementers_guide.html)
'GLEW': ['library_glew.js'],
'glfw': ['library_glfw.js'],
'glfw3': ['library_glfw.js'],
'GLU': [],
'glut': ['library_glut.js'],
'm': [],
'openal': ['library_openal.js'],
'rt': [],
'pthread': [],
'X11': ['library_xlib.js'],
'SDL': ['library_sdl.js'],
'stdc++': [],
'uuid': ['library_uuid.js'],
'websocket': ['library_websocket.js']
}
if library_name in library_map:
libs = library_map[library_name]
logger.debug('Mapping library `%s` to JS libraries: %s' % (library_name, libs))
return libs
if library_name.endswith('.js') and os.path.isfile(path_from_root('src', 'library_' + library_name)):
return ['library_' + library_name]
return None
# Map a linker flag to a settings. This lets a user write -lSDL2 and it will have the same effect as
# -s USE_SDL=2.
def map_and_apply_to_settings(library_name):
# most libraries just work, because the -l name matches the name of the
# library we build. however, if a library has variations, which cause us to
# build multiple versions with multiple names, then we need this mechanism.
library_map = {
# SDL2_mixer's built library name contains the specific codecs built in.
'SDL2_mixer': [('USE_SDL_MIXER', 2)],
}
if library_name in library_map:
for key, value in library_map[library_name]:
logger.debug('Mapping library `%s` to settings changes: %s = %s' % (library_name, key, value))
setattr(settings, key, value)
return True
return False
def emit_wasm_source_map(wasm_file, map_file, final_wasm):
base_path = os.path.dirname(os.path.abspath(final_wasm))
sourcemap_cmd = [PYTHON, path_from_root('tools', 'wasm-sourcemap.py'),
wasm_file,
'--dwarfdump=' + LLVM_DWARFDUMP,
'-o', map_file,
'--basepath=' + base_path]
check_call(sourcemap_cmd)
def get_binaryen_feature_flags():
if settings.BINARYEN_FEATURES:
return settings.BINARYEN_FEATURES
else:
return ['--detect-features']
def check_binaryen(bindir):
opt = os.path.join(bindir, exe_suffix('wasm-opt'))
if not os.path.exists(opt):
exit_with_error('binaryen executable not found (%s). Please check your binaryen installation' % opt)
try:
output = run_process([opt, '--version'], stdout=PIPE).stdout
except subprocess.CalledProcessError:
exit_with_error('error running binaryen executable (%s). Please check your binaryen installation' % opt)
if output:
output = output.splitlines()[0]
try:
version = output.split()[2]
version = int(version)
except (IndexError, ValueError):
exit_with_error('error parsing binaryen version (%s). Please check your binaryen installation (%s)' % (output, opt))
if version not in (EXPECTED_BINARYEN_VERSION, EXPECTED_BINARYEN_VERSION + 1):
diagnostics.warning('version-check', 'unexpected binaryen version: %s (expected %s)', version, EXPECTED_BINARYEN_VERSION)
def get_binaryen_bin():
global binaryen_checked
rtn = os.path.join(config.BINARYEN_ROOT, 'bin')
if not binaryen_checked:
check_binaryen(rtn)
binaryen_checked = True
return rtn
def run_binaryen_command(tool, infile, outfile=None, args=[], debug=False, stdout=None):
cmd = [os.path.join(get_binaryen_bin(), tool)]
if outfile and tool == 'wasm-opt' and \
(settings.DEBUG_LEVEL < 3 or settings.GENERATE_SOURCE_MAP):
# implemented separately from dwarf).
# note that we add this pass first, so that it doesn't interfere with
# only strip all debug info or none, which includes the name section
# which we may need
# TODO: once fastcomp is gone, either remove source maps entirely, or
# support them by emitting a source map at the end from the dwarf,
# and use llvm-objcopy to remove that final dwarf
cmd += ['--strip-dwarf']
cmd += args
if infile:
cmd += [infile]
if outfile:
cmd += ['-o', outfile]
if settings.ERROR_ON_WASM_CHANGES_AFTER_LINK:
# emit some extra helpful text for common issues
extra = ''
# a plain -O0 build *almost* doesn't need post-link changes, except for
if settings.LEGALIZE_JS_FFI:
extra += '\nnote: to disable int64 legalization (which requires changes after link) use -s WASM_BIGINT'
if settings.OPT_LEVEL > 0:
extra += '\nnote: -O2+ optimizations always require changes, build with -O0 or -O1 instead'
exit_with_error('changes to the wasm are required after link, but disallowed by ERROR_ON_WASM_CHANGES_AFTER_LINK: ' + str(cmd) + extra)
if debug:
cmd += ['-g']
cmd += get_binaryen_feature_flags()
if settings.GENERATE_SOURCE_MAP and outfile:
cmd += ['--input-source-map=' + infile + '.map']
cmd += ['--output-source-map=' + outfile + '.map']
ret = check_call(cmd, stdout=stdout).stdout
if outfile:
save_intermediate(outfile, '%s.wasm' % tool)
return ret
def run_wasm_opt(*args, **kwargs):
return run_binaryen_command('wasm-opt', *args, **kwargs)
save_intermediate_counter = 0
def save_intermediate(src, dst):
if DEBUG:
global save_intermediate_counter
dst = 'emcc-%d-%s' % (save_intermediate_counter, dst)
save_intermediate_counter += 1
dst = os.path.join(CANONICAL_TEMP_DIR, dst)
logger.debug('saving debug copy %s' % dst)
shutil.copyfile(src, dst)
| true | true |
f73c6047b94bbb337a4a4687e553ee24c17309d6 | 12,051 | py | Python | transcrypt/development/shipment/shipment_test.py | CarstenGrohmann/Transcrypt | a4c7e044b352d6200e5ea3ec6000248a243dfd91 | [
"Apache-2.0"
] | 1 | 2017-08-11T01:51:51.000Z | 2017-08-11T01:51:51.000Z | transcrypt/development/shipment/shipment_test.py | CarstenGrohmann/Transcrypt | a4c7e044b352d6200e5ea3ec6000248a243dfd91 | [
"Apache-2.0"
] | null | null | null | transcrypt/development/shipment/shipment_test.py | CarstenGrohmann/Transcrypt | a4c7e044b352d6200e5ea3ec6000248a243dfd91 | [
"Apache-2.0"
] | 1 | 2021-02-07T00:22:12.000Z | 2021-02-07T00:22:12.000Z | import os
import os.path
import sys
import datetime
import webbrowser
import argparse
import time
import traceback
import selenium
import selenium.webdriver.chrome.options
import pathlib
# ======== Command args singleton
class CommandArgs:
def __init__ (self):
self.argParser = argparse.ArgumentParser ()
self.argParser.add_argument ('-de', '--dextex', help = "show extended exception reports", action = 'store_true')
self.argParser.add_argument ('-f', '--fcall', help = 'test fast calls', action = 'store_true')
self.argParser.add_argument ('-i', '--inst', help = 'installed version rather than new one', action = 'store_true')
self.argParser.add_argument ('-b', '--blind', help = 'don\'t start browser', action = 'store_true')
self.argParser.add_argument ('-u', '--unattended', help = 'unattended mode', action = 'store_true')
self.__dict__.update (self.argParser.parse_args () .__dict__)
commandArgs = CommandArgs ()
# ======== Browser controller singleton
class BrowserController:
def __init__ (self):
self.options = selenium.webdriver.chrome.options.Options ()
self.options.add_argument ('start-maximized')
if commandArgs.unattended:
self.options.add_argument ('--headless') # Runs Chrome in headless mode.
self.options.add_argument ('--no-sandbox') # Bypass OS security model
self.options.add_argument ('--disable-gpu') # Applicable to windows OS only
self.options.add_argument ('disable-infobars')
self.options.add_argument ('--disable-extensions')
self.webDriver = selenium.webdriver.Chrome (chrome_options = self.options)
self.nrOfTabs = 0
def waitForNewTab (self):
while len (self.webDriver.window_handles) <= self.nrOfTabs:
time.sleep (0.5)
self.nrOfTabs = len (self.webDriver.window_handles)
def open (self, url, run):
print (f'Browser controller is opening URL: {url}')
try:
if self.nrOfTabs > 0:
if commandArgs.unattended:
# ---- Show in existing tab
self.webDriver.execute_script (f'window.location.href = "{url}";')
else:
# ---- Open new tab
self.webDriver.execute_script (f'window.open ("{url}","_blank");') # !!! Avoid redundant open command
self.waitForNewTab ()
self.webDriver.switch_to.window (self.webDriver.window_handles [-1])
else:
# ---- Open browser and default tab
self.webDriver.get (url)
self.waitForNewTab ()
except:
self.webDriver.switch_to.alert.accept();
if run:
while (True):
self.message = self.webDriver.find_element_by_id ('message')
if 'failed' in self.message.text or 'succeeded' in self.message.text:
break
time.sleep (0.5)
print ()
print ('=========================================================================')
print (f'Back to back autotest, result: {self.message.text.upper ()}')
print ('=========================================================================')
print ()
if 'succeeded' in self.message.text:
return True
else:
return False
else:
print ()
print ('=========================================================================')
print ('No back to back autotest')
print ('=========================================================================')
print ()
return True
browserController = BrowserController ()
# ======== Preparations
relSourcePrepathsOfErrors = []
host = 'http://localhost:'
pythonServerPort = '8000'
parcelServerPort = '8001'
nodeServerPort = '8002'
pythonServerUrl = host + pythonServerPort
parcelServerUrl = host + parcelServerPort
nodeServerUrl = host + nodeServerPort
transpileCommand = 'transcrypt' if commandArgs.inst else 'run_transcrypt'
shipDir = os.path.dirname (os.path.abspath (__file__)) .replace ('\\', '/')
appRootDir = '/'.join (shipDir.split ('/')[ : -2])
print (f'\nApplication root directory: {appRootDir}\n')
def getAbsPath (relPath):
return '{}/{}'.format (appRootDir, relPath)
os.system ('cls' if os.name == 'nt' else 'clear')
# ---- Start an http server in the Transcryp/transcrypt directory
if not commandArgs.blind:
if commandArgs.unattended:
os.system (f'py37 -m http.server --directory {appRootDir} &')
else:
os.system (f'py37 -m http.server --directory {appRootDir} &')
# ---- Allow visual check of all command line options
os.system (f'{transpileCommand} -h')
# ======== Individual test function
def test (relSourcePrepath, run, extraSwitches, messagePrename = '', nodeJs = False, parcelJs = False, build = True, pause = 0, needsAttention = False):
if commandArgs.unattended and needsAttention:
return # This test shouldn't be done, since it can't run unattended
print (f'\n\n******** BEGIN TEST {relSourcePrepath} ********\n')
time.sleep (pause)
# ---- Compute some slugs
sourcePrepath = getAbsPath (relSourcePrepath)
sourcePrepathSplit = sourcePrepath.split ("/")
sourceDir = '/'.join (sourcePrepathSplit [:-1])
moduleName = sourcePrepathSplit [-1]
targetDir = f'{sourceDir}/__target__'
targetPrepath = f'{targetDir}/{moduleName}'
messagePrepath = f'{targetDir}/{messagePrename}'
# ---- If there are relevant console messages of the compilation process,
# like with the static typechecking tests, write them into a file that can be served for a visual check
if not os.path.exists (targetDir):
os.makedirs (targetDir) # Transcrypt will make targetDir too late, so it has to happen here
redirect = f' > {messagePrepath}.out' if messagePrename else ''
# ---- Default switches
defaultSwitches = '-da -sf -de -m -n '
if commandArgs.dextex:
defaultSwitches += '-de '
if build:
defaultSwitches += '-b '
# ---- Run with CPython to generate HTML file with back to back reference info
if run:
os.system (f'{transpileCommand} -r {defaultSwitches}{extraSwitches}{sourcePrepath}')
# ---- Compile with Transcrypt
if parcelJs:
origDir = os.getcwd ()
os.chdir (sourceDir)
os.system (f'start cmd /k node test {parcelServerPort}')
os.chdir (origDir)
else:
os.system (f'{transpileCommand} {defaultSwitches}{extraSwitches}{sourcePrepath}{redirect}')
# ---- If it has to run on node, apply rollup to obtain monolith, since node doesn't support named imports and exports
if nodeJs:
os.system (f'rollup {targetPrepath}.js --o {targetPrepath}.bundle.js --f cjs')
# --- Compute appropriate URL and wait a while if needed
if not commandArgs.blind:
if parcelJs:
time.sleep (20)
url = parcelServerUrl
elif nodeJs:
os.system (f'start cmd /k node {targetPrepath}.bundle.js {nodeServerPort}')
time.sleep (5)
url = nodeServerUrl
else:
url = f'{pythonServerUrl}/{relSourcePrepath}.html'
success = browserController.open (url, run)
if commandArgs.unattended and not success:
relSourcePrepathsOfErrors.append (relSourcePrepath)
print (f'\n******** END TEST {relSourcePrepath} ********\n\n')
# ======== Perform individual tests
for switches in (('', '-f ') if commandArgs.fcall else ('',)):
test ('development/automated_tests/hello/autotest', True, switches)
test ('development/automated_tests/transcrypt/autotest', True, switches + '-c -xr -xg ')
test ('development/automated_tests/time/autotest', True, switches, needsAttention = True)
test ('development/automated_tests/re/autotest', True, switches)
test ('development/manual_tests/async_await/test', False, switches)
test ('development/manual_tests/import_export_aliases/test', False, switches + '-am ')
test ('development/manual_tests/module_random/module_random', False, switches)
test ('development/manual_tests/static_types/static_types', False, switches + '-ds -dc ', messagePrename = 'static_types')
test ('development/manual_tests/transcrypt_and_python_results_differ/results', False, switches)
test ('development/manual_tests/transcrypt_only/transcrypt_only', False, switches)
test ('demos/nodejs_demo/nodejs_demo', False, switches, nodeJs = True)
test ('demos/parcel_demo/test_shipment', False, switches, parcelJs = True)
test ('demos/terminal_demo/terminal_demo', False, switches, needsAttention = True)
test ('demos/hello/hello', False, switches, needsAttention = False)
test ('demos/jquery_demo/jquery_demo', False, switches)
test ('demos/d3js_demo/d3js_demo', False, switches)
test ('demos/ios_app/ios_app', False, switches)
test ('demos/react_demo/react_demo', False, switches)
test ('demos/riot_demo/riot_demo', False, switches)
test ('demos/plotly_demo/plotly_demo', False, switches)
test ('demos/three_demo/three_demo', False, switches)
test ('demos/pong/pong', False, switches)
test ('demos/pysteroids_demo/pysteroids', False, switches)
test ('demos/turtle_demos/star', False, switches, pause = 2)
test ('demos/turtle_demos/snowflake', False, switches, pause = 2)
test ('demos/turtle_demos/mondrian', False, switches, pause = 2)
test ('demos/turtle_demos/mandala', False, switches, pause = 2)
# test ('demos/cyclejs_demo/cyclejs_demo', False, switches)
test ('demos/cyclejs_demo/cyclejs_http_demo', False, switches)
test ('demos/cyclejs_demo/component_demos/isolated_bmi_slider/bmi', False, switches)
test ('demos/cyclejs_demo/component_demos/labeled_slider/labeled_slider', False, switches)
test ('tutorials/baseline/bl_010_hello_world/hello_world', False, switches)
test ('tutorials/baseline/bl_020_assign/assign', False, switches)
test ('tutorials/baseline/bl_030_if_else_prompt/if_else_prompt', False, switches, needsAttention = True)
test ('tutorials/baseline/bl_035_if_else_event/if_else_event', False, switches, needsAttention = True)
test ('tutorials/baseline/bl_040_for_simple/for_simple', False, switches)
test ('tutorials/baseline/bl_042_for_nested/for_nested', False, switches)
test ('tutorials/baseline/bl_045_while_simple/while_simple', False, switches, needsAttention = True)
test ('tutorials/static_typing/static_typing', False, switches + '-c -ds ', messagePrename = 'static_typing')
if relSourcePrepathsOfErrors:
print ('\n\n!!!!!!!!!!!!!!!!!!!!\n')
for relSourcePrepathOfError in relSourcePrepathsOfErrors:
print (f'SHIPMENT TEST ERROR: {relSourcePrepathOfError}')
print ('\n!!!!!!!!!!!!!!!!!!!!\n\n')
print ('\nSHIPMENT TEST FAILED\n')
sys.exit (1)
else:
# ---- Make docs, the resulting files are untracked
if not commandArgs.unattended:
origDir = os.getcwd ()
sphinxDir = '/'.join ([appRootDir, 'docs/sphinx'])
os.chdir (sphinxDir)
os.system ('touch *.rst')
os.system ('make html')
os.chdir (origDir)
# ---- Terminate
print ('\nSHIPMENT TEST SUCCEEDED\n')
sys.exit (0)
| 40.439597 | 152 | 0.604846 | import os
import os.path
import sys
import datetime
import webbrowser
import argparse
import time
import traceback
import selenium
import selenium.webdriver.chrome.options
import pathlib
class CommandArgs:
def __init__ (self):
self.argParser = argparse.ArgumentParser ()
self.argParser.add_argument ('-de', '--dextex', help = "show extended exception reports", action = 'store_true')
self.argParser.add_argument ('-f', '--fcall', help = 'test fast calls', action = 'store_true')
self.argParser.add_argument ('-i', '--inst', help = 'installed version rather than new one', action = 'store_true')
self.argParser.add_argument ('-b', '--blind', help = 'don\'t start browser', action = 'store_true')
self.argParser.add_argument ('-u', '--unattended', help = 'unattended mode', action = 'store_true')
self.__dict__.update (self.argParser.parse_args () .__dict__)
commandArgs = CommandArgs ()
# ======== Browser controller singleton
class BrowserController:
def __init__ (self):
self.options = selenium.webdriver.chrome.options.Options ()
self.options.add_argument ('start-maximized')
if commandArgs.unattended:
self.options.add_argument ('--headless') # Runs Chrome in headless mode.
self.options.add_argument ('--no-sandbox') # Bypass OS security model
self.options.add_argument ('--disable-gpu') # Applicable to windows OS only
self.options.add_argument ('disable-infobars')
self.options.add_argument ('--disable-extensions')
self.webDriver = selenium.webdriver.Chrome (chrome_options = self.options)
self.nrOfTabs = 0
def waitForNewTab (self):
while len (self.webDriver.window_handles) <= self.nrOfTabs:
time.sleep (0.5)
self.nrOfTabs = len (self.webDriver.window_handles)
def open (self, url, run):
print (f'Browser controller is opening URL: {url}')
try:
if self.nrOfTabs > 0:
if commandArgs.unattended:
# ---- Show in existing tab
self.webDriver.execute_script (f'window.location.href = "{url}";')
else:
# ---- Open new tab
self.webDriver.execute_script (f'window.open ("{url}","_blank");') # !!! Avoid redundant open command
self.waitForNewTab ()
self.webDriver.switch_to.window (self.webDriver.window_handles [-1])
else:
# ---- Open browser and default tab
self.webDriver.get (url)
self.waitForNewTab ()
except:
self.webDriver.switch_to.alert.accept();
if run:
while (True):
self.message = self.webDriver.find_element_by_id ('message')
if 'failed' in self.message.text or 'succeeded' in self.message.text:
break
time.sleep (0.5)
print ()
print ('=========================================================================')
print (f'Back to back autotest, result: {self.message.text.upper ()}')
print ('=========================================================================')
print ()
if 'succeeded' in self.message.text:
return True
else:
return False
else:
print ()
print ('=========================================================================')
print ('No back to back autotest')
print ('=========================================================================')
print ()
return True
browserController = BrowserController ()
# ======== Preparations
relSourcePrepathsOfErrors = []
host = 'http://localhost:'
pythonServerPort = '8000'
parcelServerPort = '8001'
nodeServerPort = '8002'
pythonServerUrl = host + pythonServerPort
parcelServerUrl = host + parcelServerPort
nodeServerUrl = host + nodeServerPort
transpileCommand = 'transcrypt' if commandArgs.inst else 'run_transcrypt'
shipDir = os.path.dirname (os.path.abspath (__file__)) .replace ('\\', '/')
appRootDir = '/'.join (shipDir.split ('/')[ : -2])
print (f'\nApplication root directory: {appRootDir}\n')
def getAbsPath (relPath):
return '{}/{}'.format (appRootDir, relPath)
os.system ('cls' if os.name == 'nt' else 'clear')
# ---- Start an http server in the Transcryp/transcrypt directory
if not commandArgs.blind:
if commandArgs.unattended:
os.system (f'py37 -m http.server --directory {appRootDir} &')
else:
os.system (f'py37 -m http.server --directory {appRootDir} &')
# ---- Allow visual check of all command line options
os.system (f'{transpileCommand} -h')
# ======== Individual test function
def test (relSourcePrepath, run, extraSwitches, messagePrename = '', nodeJs = False, parcelJs = False, build = True, pause = 0, needsAttention = False):
if commandArgs.unattended and needsAttention:
return # This test shouldn't be done, since it can't run unattended
print (f'\n\n******** BEGIN TEST {relSourcePrepath} ********\n')
time.sleep (pause)
# ---- Compute some slugs
sourcePrepath = getAbsPath (relSourcePrepath)
sourcePrepathSplit = sourcePrepath.split ("/")
sourceDir = '/'.join (sourcePrepathSplit [:-1])
moduleName = sourcePrepathSplit [-1]
targetDir = f'{sourceDir}/__target__'
targetPrepath = f'{targetDir}/{moduleName}'
messagePrepath = f'{targetDir}/{messagePrename}'
# ---- If there are relevant console messages of the compilation process,
# like with the static typechecking tests, write them into a file that can be served for a visual check
if not os.path.exists (targetDir):
os.makedirs (targetDir) # Transcrypt will make targetDir too late, so it has to happen here
redirect = f' > {messagePrepath}.out' if messagePrename else ''
# ---- Default switches
defaultSwitches = '-da -sf -de -m -n '
if commandArgs.dextex:
defaultSwitches += '-de '
if build:
defaultSwitches += '-b '
# ---- Run with CPython to generate HTML file with back to back reference info
if run:
os.system (f'{transpileCommand} -r {defaultSwitches}{extraSwitches}{sourcePrepath}')
# ---- Compile with Transcrypt
if parcelJs:
origDir = os.getcwd ()
os.chdir (sourceDir)
os.system (f'start cmd /k node test {parcelServerPort}')
os.chdir (origDir)
else:
os.system (f'{transpileCommand} {defaultSwitches}{extraSwitches}{sourcePrepath}{redirect}')
# ---- If it has to run on node, apply rollup to obtain monolith, since node doesn't support named imports and exports
if nodeJs:
os.system (f'rollup {targetPrepath}.js --o {targetPrepath}.bundle.js --f cjs')
if not commandArgs.blind:
if parcelJs:
time.sleep (20)
url = parcelServerUrl
elif nodeJs:
os.system (f'start cmd /k node {targetPrepath}.bundle.js {nodeServerPort}')
time.sleep (5)
url = nodeServerUrl
else:
url = f'{pythonServerUrl}/{relSourcePrepath}.html'
success = browserController.open (url, run)
if commandArgs.unattended and not success:
relSourcePrepathsOfErrors.append (relSourcePrepath)
print (f'\n******** END TEST {relSourcePrepath} ********\n\n')
for switches in (('', '-f ') if commandArgs.fcall else ('',)):
test ('development/automated_tests/hello/autotest', True, switches)
test ('development/automated_tests/transcrypt/autotest', True, switches + '-c -xr -xg ')
test ('development/automated_tests/time/autotest', True, switches, needsAttention = True)
test ('development/automated_tests/re/autotest', True, switches)
test ('development/manual_tests/async_await/test', False, switches)
test ('development/manual_tests/import_export_aliases/test', False, switches + '-am ')
test ('development/manual_tests/module_random/module_random', False, switches)
test ('development/manual_tests/static_types/static_types', False, switches + '-ds -dc ', messagePrename = 'static_types')
test ('development/manual_tests/transcrypt_and_python_results_differ/results', False, switches)
test ('development/manual_tests/transcrypt_only/transcrypt_only', False, switches)
test ('demos/nodejs_demo/nodejs_demo', False, switches, nodeJs = True)
test ('demos/parcel_demo/test_shipment', False, switches, parcelJs = True)
test ('demos/terminal_demo/terminal_demo', False, switches, needsAttention = True)
test ('demos/hello/hello', False, switches, needsAttention = False)
test ('demos/jquery_demo/jquery_demo', False, switches)
test ('demos/d3js_demo/d3js_demo', False, switches)
test ('demos/ios_app/ios_app', False, switches)
test ('demos/react_demo/react_demo', False, switches)
test ('demos/riot_demo/riot_demo', False, switches)
test ('demos/plotly_demo/plotly_demo', False, switches)
test ('demos/three_demo/three_demo', False, switches)
test ('demos/pong/pong', False, switches)
test ('demos/pysteroids_demo/pysteroids', False, switches)
test ('demos/turtle_demos/star', False, switches, pause = 2)
test ('demos/turtle_demos/snowflake', False, switches, pause = 2)
test ('demos/turtle_demos/mondrian', False, switches, pause = 2)
test ('demos/turtle_demos/mandala', False, switches, pause = 2)
test ('demos/cyclejs_demo/cyclejs_http_demo', False, switches)
test ('demos/cyclejs_demo/component_demos/isolated_bmi_slider/bmi', False, switches)
test ('demos/cyclejs_demo/component_demos/labeled_slider/labeled_slider', False, switches)
test ('tutorials/baseline/bl_010_hello_world/hello_world', False, switches)
test ('tutorials/baseline/bl_020_assign/assign', False, switches)
test ('tutorials/baseline/bl_030_if_else_prompt/if_else_prompt', False, switches, needsAttention = True)
test ('tutorials/baseline/bl_035_if_else_event/if_else_event', False, switches, needsAttention = True)
test ('tutorials/baseline/bl_040_for_simple/for_simple', False, switches)
test ('tutorials/baseline/bl_042_for_nested/for_nested', False, switches)
test ('tutorials/baseline/bl_045_while_simple/while_simple', False, switches, needsAttention = True)
test ('tutorials/static_typing/static_typing', False, switches + '-c -ds ', messagePrename = 'static_typing')
if relSourcePrepathsOfErrors:
print ('\n\n!!!!!!!!!!!!!!!!!!!!\n')
for relSourcePrepathOfError in relSourcePrepathsOfErrors:
print (f'SHIPMENT TEST ERROR: {relSourcePrepathOfError}')
print ('\n!!!!!!!!!!!!!!!!!!!!\n\n')
print ('\nSHIPMENT TEST FAILED\n')
sys.exit (1)
else:
if not commandArgs.unattended:
origDir = os.getcwd ()
sphinxDir = '/'.join ([appRootDir, 'docs/sphinx'])
os.chdir (sphinxDir)
os.system ('touch *.rst')
os.system ('make html')
os.chdir (origDir)
print ('\nSHIPMENT TEST SUCCEEDED\n')
sys.exit (0)
| true | true |
f73c62120cea10949b8e1d84d60b6e62f5947291 | 3,425 | py | Python | src/ggrc/migrations/utils/fix_acl.py | MikalaiMikalalai/ggrc-core | f0f83b3638574bb64de474f3b70ed27436ca812a | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-01-12T23:46:00.000Z | 2019-01-12T23:46:00.000Z | src/ggrc/migrations/utils/fix_acl.py | MikalaiMikalalai/ggrc-core | f0f83b3638574bb64de474f3b70ed27436ca812a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/ggrc/migrations/utils/fix_acl.py | MikalaiMikalalai/ggrc-core | f0f83b3638574bb64de474f3b70ed27436ca812a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Functions to fix data related to ACL"""
import sqlalchemy as sa
# pylint: disable=too-many-arguments
def create_missing_acl(connection, migration_user_id, role_id,
table_name, object_type, revision_action):
"""Insert into access_control_list record for missing role
If we have 'create' revision -> take modified_by_id as Admin
else set current migration user to modified_by_id
If there are multiple 'create' revisions, take each distinct modified_by_id,
because there is no way of knowing which of the duplicate revisions
is correct.
Args:
connection: SQLAlchemy connection object;
migration_user_id: the id of Migrator user (used as a default Admin);
role_id: ACR.id of the correct role;
table_name: name of the table with ids of objects with no Admins;
object_type: string name of object type processed (e.g. 'Document');
revision_action: the value for Revision.action field (e.g. 'created').
"""
sql = """
INSERT INTO access_control_list (
ac_role_id,
object_id,
object_type,
created_at,
modified_by_id,
updated_at)
SELECT
:admin_role_id,
twoa.id as object_id,
:object_type,
NOW(),
:migration_user_id,
NOW()
FROM {table_mame} twoa
LEFT OUTER JOIN revisions r ON
r.resource_id=twoa.id
AND r.resource_type=:object_type
AND r.action=:revision_action
GROUP BY object_id
""".format(table_mame=table_name)
connection.execute(
sa.text(sql),
migration_user_id=migration_user_id,
admin_role_id=role_id,
object_type=object_type,
revision_action=revision_action,
)
# pylint: disable=too-many-arguments
def create_missing_acp(connection, migrator_id, role_name, revision_action):
"""Insert into access_control_people record for missing Admin role
If we have 'create' revision -> take modified_by_id as Admin
else set current migration user to modified_by_id
If there are multiple 'create' revisions, take each distinct modified_by_id,
because there is no way of knowing which of the duplicate revisions
is correct.
Args:
connection: SQLAlchemy connection object;
migrator_id: the id of Migrator user (used as a default Admin);
role_name: ACR.name of the Admin role;
revision_action: the value for Revision.action field (e.g. 'created').
"""
sql = """
INSERT INTO access_control_people (
person_id,
ac_list_id,
updated_at,
modified_by_id,
created_at)
SELECT
IF(r.modified_by_id is NOT NULL, r.modified_by_id,
:migrator_id) as person_id,
acl.id AS ac_list_id,
NOW() AS updated_at,
:migrator_id AS modified_by_id,
NOW() AS created_at
FROM access_control_list acl
LEFT OUTER JOIN revisions r ON
r.resource_type = acl.object_type AND
r.resource_id = acl.object_id
LEFT OUTER JOIN access_control_roles acr ON
acr.id = acl.ac_role_id
WHERE r.action = 'created' AND
acr.name = :admin_role_name
"""
connection.execute(
sa.text(sql),
migrator_id=migrator_id,
admin_role_name=role_name,
revision_action=revision_action,
)
| 32.009346 | 78 | 0.681752 |
import sqlalchemy as sa
def create_missing_acl(connection, migration_user_id, role_id,
table_name, object_type, revision_action):
sql = """
INSERT INTO access_control_list (
ac_role_id,
object_id,
object_type,
created_at,
modified_by_id,
updated_at)
SELECT
:admin_role_id,
twoa.id as object_id,
:object_type,
NOW(),
:migration_user_id,
NOW()
FROM {table_mame} twoa
LEFT OUTER JOIN revisions r ON
r.resource_id=twoa.id
AND r.resource_type=:object_type
AND r.action=:revision_action
GROUP BY object_id
""".format(table_mame=table_name)
connection.execute(
sa.text(sql),
migration_user_id=migration_user_id,
admin_role_id=role_id,
object_type=object_type,
revision_action=revision_action,
)
def create_missing_acp(connection, migrator_id, role_name, revision_action):
sql = """
INSERT INTO access_control_people (
person_id,
ac_list_id,
updated_at,
modified_by_id,
created_at)
SELECT
IF(r.modified_by_id is NOT NULL, r.modified_by_id,
:migrator_id) as person_id,
acl.id AS ac_list_id,
NOW() AS updated_at,
:migrator_id AS modified_by_id,
NOW() AS created_at
FROM access_control_list acl
LEFT OUTER JOIN revisions r ON
r.resource_type = acl.object_type AND
r.resource_id = acl.object_id
LEFT OUTER JOIN access_control_roles acr ON
acr.id = acl.ac_role_id
WHERE r.action = 'created' AND
acr.name = :admin_role_name
"""
connection.execute(
sa.text(sql),
migrator_id=migrator_id,
admin_role_name=role_name,
revision_action=revision_action,
)
| true | true |
f73c62330712aa3393752342e447a3eaba185e4a | 207 | py | Python | src/gcp_fastapi_poetry/types.py | k2bd/gcp-fastapi-poetry | cf0f4db1fa3ac2abc35cc7563a19a6f527e7abdf | [
"MIT"
] | 2 | 2022-01-08T15:38:42.000Z | 2022-01-28T20:42:13.000Z | src/gcp_fastapi_poetry/types.py | k2bd/gcp-fastapi-poetry | cf0f4db1fa3ac2abc35cc7563a19a6f527e7abdf | [
"MIT"
] | 3 | 2022-01-08T14:24:15.000Z | 2022-01-08T17:52:09.000Z | src/gcp_fastapi_poetry/types.py | k2bd/gcp-fastapi-poetry | cf0f4db1fa3ac2abc35cc7563a19a6f527e7abdf | [
"MIT"
] | null | null | null | from fastapi_camelcase import CamelModel
class ExampleResponse(CamelModel):
"""
A person, place, or thing to say hello to
"""
#: Some value of the example response
response_value: str
| 18.818182 | 45 | 0.695652 | from fastapi_camelcase import CamelModel
class ExampleResponse(CamelModel):
response_value: str
| true | true |
f73c6236ff880ec85b61e187f5e2c2fd6f708956 | 2,536 | py | Python | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ContextQueryLogRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ContextQueryLogRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ContextQueryLogRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ContextQueryLogRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'ContextQueryLog')
def get_PackId(self):
return self.get_query_params().get('PackId')
def set_PackId(self,PackId):
self.add_query_param('PackId',PackId)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_TotalOffset(self):
return self.get_query_params().get('TotalOffset')
def set_TotalOffset(self,TotalOffset):
self.add_query_param('TotalOffset',TotalOffset)
def get_Size(self):
return self.get_query_params().get('Size')
def set_Size(self,Size):
self.add_query_param('Size',Size)
def get_PackMeta(self):
return self.get_query_params().get('PackMeta')
def set_PackMeta(self,PackMeta):
self.add_query_param('PackMeta',PackMeta)
def get__From(self):
return self.get_query_params().get('From')
def set__From(self,_From):
self.add_query_param('From',_From)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_To(self):
return self.get_query_params().get('To')
def set_To(self,To):
self.add_query_param('To',To)
def get_Reverse(self):
return self.get_query_params().get('Reverse')
def set_Reverse(self,Reverse):
self.add_query_param('Reverse',Reverse)
def get_LogStore(self):
return self.get_query_params().get('LogStore')
def set_LogStore(self,LogStore):
self.add_query_param('LogStore',LogStore) | 30.190476 | 68 | 0.74724 |
from aliyunsdkcore.request import RpcRequest
class ContextQueryLogRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'ContextQueryLog')
def get_PackId(self):
return self.get_query_params().get('PackId')
def set_PackId(self,PackId):
self.add_query_param('PackId',PackId)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_TotalOffset(self):
return self.get_query_params().get('TotalOffset')
def set_TotalOffset(self,TotalOffset):
self.add_query_param('TotalOffset',TotalOffset)
def get_Size(self):
return self.get_query_params().get('Size')
def set_Size(self,Size):
self.add_query_param('Size',Size)
def get_PackMeta(self):
return self.get_query_params().get('PackMeta')
def set_PackMeta(self,PackMeta):
self.add_query_param('PackMeta',PackMeta)
def get__From(self):
return self.get_query_params().get('From')
def set__From(self,_From):
self.add_query_param('From',_From)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_To(self):
return self.get_query_params().get('To')
def set_To(self,To):
self.add_query_param('To',To)
def get_Reverse(self):
return self.get_query_params().get('Reverse')
def set_Reverse(self,Reverse):
self.add_query_param('Reverse',Reverse)
def get_LogStore(self):
return self.get_query_params().get('LogStore')
def set_LogStore(self,LogStore):
self.add_query_param('LogStore',LogStore) | true | true |
f73c639a89fdbf68215d5a0345d237d6c3d9b14d | 7,533 | py | Python | tests/system/esmvaltool_testlib.py | cffbots/ESMValTool | a9b6592a02f2085634a214ff5f36a736fa18ff47 | [
"Apache-2.0"
] | 148 | 2017-02-07T13:16:03.000Z | 2022-03-26T02:21:56.000Z | tests/system/esmvaltool_testlib.py | cffbots/ESMValTool | a9b6592a02f2085634a214ff5f36a736fa18ff47 | [
"Apache-2.0"
] | 2,026 | 2017-02-03T12:57:13.000Z | 2022-03-31T15:11:51.000Z | tests/system/esmvaltool_testlib.py | cffbots/ESMValTool | a9b6592a02f2085634a214ff5f36a736fa18ff47 | [
"Apache-2.0"
] | 113 | 2017-01-27T13:10:19.000Z | 2022-02-03T13:42:11.000Z | """Provide a class for testing esmvaltool."""
import glob
import os
import shutil
import sys
from unittest import SkipTest
import numpy as np
import yaml
# from easytest import EasyTest
import esmvaltool
def _load_config(filename=None):
"""Load test configuration"""
if filename is None:
# look in default locations for config-test.yml
config_file = 'config-test.yml'
default_locations = [
'.',
'~/.esmvaltool',
os.path.dirname(__file__),
]
for path in default_locations:
filepath = os.path.join(os.path.expanduser(path), config_file)
if os.path.exists(filepath):
filename = os.path.abspath(filepath)
break
with open(filename, 'r') as file:
cfg = yaml.safe_load(file)
cfg['configfile'] = filename
cfg['reference']['output'] = os.path.abspath(
os.path.expanduser(cfg['reference']['output']))
if cfg['test'].get('recipes', []) == []:
script_root = esmvaltool.get_script_root()
recipe_glob = os.path.join(script_root, 'nml', 'recipe_*.yml')
cfg['test']['recipes'] = glob.glob(recipe_glob)
return cfg
_CFG = _load_config()
RECIPES = _CFG['test']['recipes']
def _create_config_user_file(output_directory):
"""Write a config-user.yml file.
Write a configuration file for running ESMValTool
such that it writes all output to `output_directory`.
"""
cfg = _CFG['user']
cfg['output_dir'] = output_directory
# write to file
filename = os.path.join(output_directory, 'config-user.yml')
with open(filename, 'w') as file:
yaml.safe_dump(cfg, file)
return filename
class ESMValToolTest: # was ESMValToolTest(EasyTest)
"""Main class for ESMValTool test runs."""
def __init__(self, recipe, output_directory, ignore='', **kwargs):
"""
Create ESMValToolTest instance
recipe: str
The filename of the recipe that should be tested.
output_directory : str
The name of a directory where results can be stored.
ignore: str or iterable of str
Glob patterns of files to be ignored when testing.
"""
if not _CFG['test']['run']:
raise SkipTest("System tests disabled in {}".format(
_CFG['configfile']))
self.ignore = (ignore, ) if isinstance(ignore, str) else ignore
script_root = esmvaltool.get_script_root()
# Set recipe path
if not os.path.exists(recipe):
recipe = os.path.join(
os.path.dirname(script_root), 'recipes', recipe)
self.recipe_file = os.path.abspath(recipe)
# Simulate input data?
self.simulate_input = _CFG['test']['simulate_input']
# Create reference output?
self.create_reference_output = _CFG['reference']['generate']
# Define reference output path
reference_dir = os.path.join(
_CFG['reference']['output'],
os.path.splitext(os.path.basename(self.recipe_file))[0])
# If reference data is neither available nor should be generated, skip
if not (os.path.exists(reference_dir) or self.create_reference_output):
raise SkipTest(
"No reference data available for recipe {} in {}".format(
recipe, _CFG['reference']['output']))
# Write ESMValTool configuration file
self.config_user_file = _create_config_user_file(output_directory)
super(ESMValToolTest, self).__init__(
exe='esmvaltool',
args=['-n', self.recipe_file, '-c', self.config_user_file],
output_directory=output_directory,
refdirectory=reference_dir,
**kwargs)
def run(self, **kwargs):
"""Run tests or generate reference data."""
if self.simulate_input:
from .data_simulator import simulate_input_data
simulate_input_data(
recipe_file=self.recipe_file,
config_user_file=self.config_user_file)
if self.create_reference_output:
self.generate_reference_output()
raise SkipTest("Generated reference data instead of running test")
else:
super(ESMValToolTest, self).run_tests(**kwargs)
def generate_reference_output(self):
"""Generate reference output.
Generate reference data by executing the recipe and then moving
results to the output directory.
"""
if not os.path.exists(self.refdirectory):
self._execute()
shutil.move(self.output_directory,
os.path.dirname(self.refdirectory))
else:
print("Warning: not generating reference data, reference "
"directory {} already exists.".format(self.refdirectory))
def _execute(self):
"""Execute ESMValTool
Override the _execute method because we want to run in our own
Python instance to get coverage reporting and we want to update
the location of `self.output_directory` afterwards.
"""
# run ESMValTool
sys.argv[1:] = self.args
esmvaltool.main.run()
# Update the output directory to point to the output of the run
output_directory = self.output_directory # noqa
output = []
for path in os.listdir(output_directory):
path = os.path.join(output_directory, path)
if os.path.isdir(path):
output.append(path)
if not output:
raise OSError(
"Output directory not found in location {}. "
"Probably ESMValTool failed to create any output.".format(
output_directory))
if len(output) > 1:
print("Warning: found multiple output directories:\n{}\nin output "
"location {}\nusing the first one.".format(
output, output_directory))
self.output_directory = output[0] + os.sep # noqa
def _get_files_from_refdir(self):
"""Get a list of files from reference directory.
Ignore files that match patterns in self.ignore.
Override this method of easytest.EasyTest to be able to ignore certain
files.
"""
from fnmatch import fnmatchcase
matches = []
for root, _, filenames in os.walk(self.refdirectory):
for filename in filenames:
path = os.path.join(root, filename)
relpath = os.path.relpath(path, start=self.refdirectory)
for pattern in self.ignore:
if fnmatchcase(relpath, pattern):
break
else:
matches.append(path)
return matches
def _compare_netcdf_values(self, f1, f2, allow_subset=False):
"""Compare two netCDF4 Dataset instances.
Check if dataset2 contains the same variable values as dataset1.
Override this method of easytest.EasyTest because it is broken
for the case where value1 and value2 have no length.
"""
if allow_subset: # allow that only a subset of data is compared
raise NotImplementedError
for key in f1.variables:
values1 = f1.variables[key][:]
values2 = f2.variables[key][:]
if not np.array_equal(values1, values2):
return False
return True
| 33.039474 | 79 | 0.608257 |
import glob
import os
import shutil
import sys
from unittest import SkipTest
import numpy as np
import yaml
import esmvaltool
def _load_config(filename=None):
if filename is None:
config_file = 'config-test.yml'
default_locations = [
'.',
'~/.esmvaltool',
os.path.dirname(__file__),
]
for path in default_locations:
filepath = os.path.join(os.path.expanduser(path), config_file)
if os.path.exists(filepath):
filename = os.path.abspath(filepath)
break
with open(filename, 'r') as file:
cfg = yaml.safe_load(file)
cfg['configfile'] = filename
cfg['reference']['output'] = os.path.abspath(
os.path.expanduser(cfg['reference']['output']))
if cfg['test'].get('recipes', []) == []:
script_root = esmvaltool.get_script_root()
recipe_glob = os.path.join(script_root, 'nml', 'recipe_*.yml')
cfg['test']['recipes'] = glob.glob(recipe_glob)
return cfg
_CFG = _load_config()
RECIPES = _CFG['test']['recipes']
def _create_config_user_file(output_directory):
cfg = _CFG['user']
cfg['output_dir'] = output_directory
filename = os.path.join(output_directory, 'config-user.yml')
with open(filename, 'w') as file:
yaml.safe_dump(cfg, file)
return filename
class ESMValToolTest:
def __init__(self, recipe, output_directory, ignore='', **kwargs):
if not _CFG['test']['run']:
raise SkipTest("System tests disabled in {}".format(
_CFG['configfile']))
self.ignore = (ignore, ) if isinstance(ignore, str) else ignore
script_root = esmvaltool.get_script_root()
if not os.path.exists(recipe):
recipe = os.path.join(
os.path.dirname(script_root), 'recipes', recipe)
self.recipe_file = os.path.abspath(recipe)
self.simulate_input = _CFG['test']['simulate_input']
self.create_reference_output = _CFG['reference']['generate']
reference_dir = os.path.join(
_CFG['reference']['output'],
os.path.splitext(os.path.basename(self.recipe_file))[0])
if not (os.path.exists(reference_dir) or self.create_reference_output):
raise SkipTest(
"No reference data available for recipe {} in {}".format(
recipe, _CFG['reference']['output']))
self.config_user_file = _create_config_user_file(output_directory)
super(ESMValToolTest, self).__init__(
exe='esmvaltool',
args=['-n', self.recipe_file, '-c', self.config_user_file],
output_directory=output_directory,
refdirectory=reference_dir,
**kwargs)
def run(self, **kwargs):
if self.simulate_input:
from .data_simulator import simulate_input_data
simulate_input_data(
recipe_file=self.recipe_file,
config_user_file=self.config_user_file)
if self.create_reference_output:
self.generate_reference_output()
raise SkipTest("Generated reference data instead of running test")
else:
super(ESMValToolTest, self).run_tests(**kwargs)
def generate_reference_output(self):
if not os.path.exists(self.refdirectory):
self._execute()
shutil.move(self.output_directory,
os.path.dirname(self.refdirectory))
else:
print("Warning: not generating reference data, reference "
"directory {} already exists.".format(self.refdirectory))
def _execute(self):
sys.argv[1:] = self.args
esmvaltool.main.run()
output_directory = self.output_directory
output = []
for path in os.listdir(output_directory):
path = os.path.join(output_directory, path)
if os.path.isdir(path):
output.append(path)
if not output:
raise OSError(
"Output directory not found in location {}. "
"Probably ESMValTool failed to create any output.".format(
output_directory))
if len(output) > 1:
print("Warning: found multiple output directories:\n{}\nin output "
"location {}\nusing the first one.".format(
output, output_directory))
self.output_directory = output[0] + os.sep
def _get_files_from_refdir(self):
from fnmatch import fnmatchcase
matches = []
for root, _, filenames in os.walk(self.refdirectory):
for filename in filenames:
path = os.path.join(root, filename)
relpath = os.path.relpath(path, start=self.refdirectory)
for pattern in self.ignore:
if fnmatchcase(relpath, pattern):
break
else:
matches.append(path)
return matches
def _compare_netcdf_values(self, f1, f2, allow_subset=False):
if allow_subset:
raise NotImplementedError
for key in f1.variables:
values1 = f1.variables[key][:]
values2 = f2.variables[key][:]
if not np.array_equal(values1, values2):
return False
return True
| true | true |
f73c650605b57aca4116cb567499a484e2218797 | 1,066 | py | Python | x11/massSpringParam1.py | Shirshakk-P/ControlSystems | 2a6b147aa583cf5329ce9c84b0d84d72aba2bda4 | [
"MIT"
] | 1 | 2021-01-26T14:52:06.000Z | 2021-01-26T14:52:06.000Z | x11/massSpringParam1.py | Shirshakk-P/ControlSystems | 2a6b147aa583cf5329ce9c84b0d84d72aba2bda4 | [
"MIT"
] | null | null | null | x11/massSpringParam1.py | Shirshakk-P/ControlSystems | 2a6b147aa583cf5329ce9c84b0d84d72aba2bda4 | [
"MIT"
] | null | null | null | #Mass Spring Damper system Parameter File
import numpy as np
import control as cnt
import sys
sys.path.append('..') #add parent directory
import massSpringParam as P
Ts = P.Ts
beta = P.beta
tau_max = P.tau_max
m = P.m
k = P.k
b = P.b
#tuning parameters
#tr=1.6 #previous homework was done on the basis of tr=1.6 and step input is taken from this homework onwards
tr = 1.5
zeta = 0.7
#State Space Equations
# xdot = A*x + B*u
# y = C*x
A = np.array([[0.0, 1.0],
[-P.k/P.m, -P.b/P.m]])
B = np.array([[0.0],
[1.0/P.m]])
C = np.array([[1.0, 0.0]])
#gain calculation
wn = 2.2/tr #natural frequency
des_char_poly = [1, 2*zeta*wn, wn**2]
des_poles = np.roots(des_char_poly)
#Compute the gains if the system is controllable
if np.linalg.matrix_rank(cnt.ctrb(A, B)) != 2:
print("The system is not controllable")
else:
#.A just turns K matrix into a numpy array
K = (cnt.acker(A, B, des_poles)).A
kr = -1.0/(C @ np.linalg.inv(A - B @ K) @ B)
print('K: ', K)
print('kr: ', kr)
| 24.227273 | 113 | 0.602251 |
import numpy as np
import control as cnt
import sys
sys.path.append('..')
import massSpringParam as P
Ts = P.Ts
beta = P.beta
tau_max = P.tau_max
m = P.m
k = P.k
b = P.b
ray([[0.0],
[1.0/P.m]])
C = np.array([[1.0, 0.0]])
wn = 2.2/tr
des_char_poly = [1, 2*zeta*wn, wn**2]
des_poles = np.roots(des_char_poly)
if np.linalg.matrix_rank(cnt.ctrb(A, B)) != 2:
print("The system is not controllable")
else:
K = (cnt.acker(A, B, des_poles)).A
kr = -1.0/(C @ np.linalg.inv(A - B @ K) @ B)
print('K: ', K)
print('kr: ', kr)
| true | true |
f73c65fbd1d8b07590bab28824b770d33b261d3b | 6,410 | py | Python | python/tvm/autotvm/feature.py | XiaoSong9905/tvm | 48940f697e15d5b50fa1f032003e6c700ae1e423 | [
"Apache-2.0"
] | 4,640 | 2017-08-17T19:22:15.000Z | 2019-11-04T15:29:46.000Z | python/tvm/autotvm/feature.py | XiaoSong9905/tvm | 48940f697e15d5b50fa1f032003e6c700ae1e423 | [
"Apache-2.0"
] | 3,022 | 2020-11-24T14:02:31.000Z | 2022-03-31T23:55:31.000Z | python/tvm/autotvm/feature.py | XiaoSong9905/tvm | 48940f697e15d5b50fa1f032003e6c700ae1e423 | [
"Apache-2.0"
] | 1,352 | 2017-08-17T19:30:38.000Z | 2019-11-04T16:09:29.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,
"""Extract feature of iter vars
There are two types of feature
1) Itervar feature
This feature is extracted based on loop variables.
Different loop structures will result in different shapes of feature
2) Curve sample feature (relation feature)
This feature is extracted by sampling relation curve.
This feature is invariant of loop structure.
"""
import struct
import numpy as np
import tvm._ffi
from tvm.target import Target
from tvm.driver import build_module
def ana_lower(sch, args, binds=None, simple_mode=True):
"""Do lower while keeping all axes in IR
i.e. Do not eliminate loop with extent of 1, do not vectorize, unroll or inject virtual threads
"""
sch = sch.normalize()
# Phase 0
context = tvm.transform.PassContext(config={"tir.debug_keep_trivial_loop": True})
with context:
mod = build_module.schedule_to_module(sch, args, binds=binds)
mod = tvm.tir.transform.StorageFlatten(64)(mod._move())
mod = tvm.tir.transform.Simplify()(mod._move())
assert simple_mode
return mod["main"].body
try:
_get_buffer_curve_sample_flatten = tvm._ffi.get_global_func(
"autotvm.feature.GetCurveSampleFeatureFlatten"
)
_get_itervar_feature = tvm._ffi.get_global_func("autotvm.feature.GetItervarFeature")
_get_itervar_feature_flatten = tvm._ffi.get_global_func(
"autotvm.feature.GetItervarFeatureFlatten"
)
except ValueError as e:
def raise_error(*args, **kwargs): # pylint: disable=unused-argument
raise RuntimeError("Cannot load autotvm c++ API")
_get_buffer_curve_sample_flatten = (
_get_itervar_feature
) = _get_itervar_feature_flatten = raise_error
def get_itervar_feature(sch, args, take_log=False):
"""get features of iter vars
Parameters
----------
sch: tvm.te.schedule.Schedule
args: Array of te.tensor.Tensor
the buffer args for lower
take_log: bool
whether take log of numerical statics
Returns
-------
features of every axis in the IR, see doc/features.md for detail
"""
stmt = ana_lower(sch, args, simple_mode=True)
feas = _get_itervar_feature(stmt, take_log)
# convert tvm node to python type
ret = []
for row in feas:
tmp = []
tmp.append([row[0][0].value, row[0][1]])
for item in row[1:]:
tmp.append([item[0].value] + [x.value for x in item[1:]])
ret.append(tmp)
return ret
def flatten_itervar_feature(fea):
"""flatten features into one-dimensional feature vectors
Parameters
----------
fea: list
return value of get_itervar_feature
Returns
-------
flatten_feature: np.ndarray
one-dimensional vector
"""
flatten = []
for axis in fea:
for pair in axis[1:]:
flatten.append(pair[1:])
return np.concatenate(flatten)
def get_itervar_feature_flatten(sch, args, take_log=True):
"""get flatten features of iter vars
this is equivalent to get_itervar_feature + flatten_itervar_feature, but much faster.
Parameters
----------
sch: tvm.te.schedule.Schedule
args: Array of te.tensor.Tensor
the buffer args for lower
take_log: bool
whether take log of numerical statics
Returns
-------
flatten_feature: np.ndarray
one-dimensional vector
"""
stmt = ana_lower(sch, args, simple_mode=True)
feas = _get_itervar_feature_flatten(stmt, take_log)
feas = struct.unpack("%df" % (len(feas) // 4), feas)
return feas
def get_flatten_name(fea):
"""Get names of feature after flatten.
Parameters
----------
fea: list or str
return value of get_itervar_feature or a line of logfile
Returns
-------
feature_names: Array of str
"""
feature_name = {
"_attr_": ["length", "nest_level", "topdown", "bottomup"]
+ ["ann_%d" % i for i in range(20)],
"_arith_": ["add", "mul", "div"],
"buf_touch": ["stride", "mod", "count", "reuse", "T_count", "T_reuse"],
}
if isinstance(fea, str):
# pylint: disable=import-outside-toplevel
from .record import decode
# flatten line to feature
line = fea
ret = decode(line)
if ret is None:
raise ValueError("Unsupported AutoTVM log format")
inp, _ = ret
target = Target(inp.target)
with target:
s, args = inp.template.instantiate(inp.config)
fea = get_itervar_feature(s, args)
names = []
ct = 0
for row in fea:
var_name = str(row[0][1])
for pair in row[1:]:
key = pair[0]
if key in feature_name:
name_list = feature_name[key]
else:
name_list = feature_name["buf_touch"]
for i in range(len((pair[1:]))):
names.append(".".join(["f%d" % ct, var_name, key, name_list[i]]))
ct += 1
return names
def get_buffer_curve_sample_flatten(sch, args, sample_n=30):
"""
Get flatten curve sample feature (relation feature)
Parameters
----------
sch: tvm.te.schedule.Schedule
args: Array of te.tensor.Tensor
the buffer args for lower
sample_n: int
number of sample points along one dimension
Returns
-------
flatten_feature: np.ndarray
one-dimensional vector
"""
stmt = ana_lower(sch, args, simple_mode=True)
feas = _get_buffer_curve_sample_flatten(stmt, sample_n, False)
feas = struct.unpack("%df" % (len(feas) // 4), feas)
return feas
| 29.675926 | 99 | 0.651482 |
import struct
import numpy as np
import tvm._ffi
from tvm.target import Target
from tvm.driver import build_module
def ana_lower(sch, args, binds=None, simple_mode=True):
sch = sch.normalize()
context = tvm.transform.PassContext(config={"tir.debug_keep_trivial_loop": True})
with context:
mod = build_module.schedule_to_module(sch, args, binds=binds)
mod = tvm.tir.transform.StorageFlatten(64)(mod._move())
mod = tvm.tir.transform.Simplify()(mod._move())
assert simple_mode
return mod["main"].body
try:
_get_buffer_curve_sample_flatten = tvm._ffi.get_global_func(
"autotvm.feature.GetCurveSampleFeatureFlatten"
)
_get_itervar_feature = tvm._ffi.get_global_func("autotvm.feature.GetItervarFeature")
_get_itervar_feature_flatten = tvm._ffi.get_global_func(
"autotvm.feature.GetItervarFeatureFlatten"
)
except ValueError as e:
def raise_error(*args, **kwargs):
raise RuntimeError("Cannot load autotvm c++ API")
_get_buffer_curve_sample_flatten = (
_get_itervar_feature
) = _get_itervar_feature_flatten = raise_error
def get_itervar_feature(sch, args, take_log=False):
stmt = ana_lower(sch, args, simple_mode=True)
feas = _get_itervar_feature(stmt, take_log)
ret = []
for row in feas:
tmp = []
tmp.append([row[0][0].value, row[0][1]])
for item in row[1:]:
tmp.append([item[0].value] + [x.value for x in item[1:]])
ret.append(tmp)
return ret
def flatten_itervar_feature(fea):
flatten = []
for axis in fea:
for pair in axis[1:]:
flatten.append(pair[1:])
return np.concatenate(flatten)
def get_itervar_feature_flatten(sch, args, take_log=True):
stmt = ana_lower(sch, args, simple_mode=True)
feas = _get_itervar_feature_flatten(stmt, take_log)
feas = struct.unpack("%df" % (len(feas) // 4), feas)
return feas
def get_flatten_name(fea):
feature_name = {
"_attr_": ["length", "nest_level", "topdown", "bottomup"]
+ ["ann_%d" % i for i in range(20)],
"_arith_": ["add", "mul", "div"],
"buf_touch": ["stride", "mod", "count", "reuse", "T_count", "T_reuse"],
}
if isinstance(fea, str):
from .record import decode
line = fea
ret = decode(line)
if ret is None:
raise ValueError("Unsupported AutoTVM log format")
inp, _ = ret
target = Target(inp.target)
with target:
s, args = inp.template.instantiate(inp.config)
fea = get_itervar_feature(s, args)
names = []
ct = 0
for row in fea:
var_name = str(row[0][1])
for pair in row[1:]:
key = pair[0]
if key in feature_name:
name_list = feature_name[key]
else:
name_list = feature_name["buf_touch"]
for i in range(len((pair[1:]))):
names.append(".".join(["f%d" % ct, var_name, key, name_list[i]]))
ct += 1
return names
def get_buffer_curve_sample_flatten(sch, args, sample_n=30):
stmt = ana_lower(sch, args, simple_mode=True)
feas = _get_buffer_curve_sample_flatten(stmt, sample_n, False)
feas = struct.unpack("%df" % (len(feas) // 4), feas)
return feas
| true | true |
f73c660e82f2cca179f278c0aeedbb615587689d | 6,230 | py | Python | examples/remarketing/upload_conversion_adjustment.py | Insutanto/google-ads-python | f63e318ca39f2ecc6546fba69994456815727578 | [
"Apache-2.0"
] | null | null | null | examples/remarketing/upload_conversion_adjustment.py | Insutanto/google-ads-python | f63e318ca39f2ecc6546fba69994456815727578 | [
"Apache-2.0"
] | null | null | null | examples/remarketing/upload_conversion_adjustment.py | Insutanto/google-ads-python | f63e318ca39f2ecc6546fba69994456815727578 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example imports conversion adjustments for existing conversions.
To set up a conversion action, run the add_conversion_action.py example.
"""
import argparse
import sys
from google.ads.google_ads.client import GoogleAdsClient
from google.ads.google_ads.errors import GoogleAdsException
def main(client, customer_id, conversion_action_id, gclid, adjustment_type,
conversion_date_time, adjustment_date_time, restatement_value):
# Determine the adjustment type.
conversion_adjustment_type_enum = (
client.get_type('ConversionAdjustmentTypeEnum'))
if adjustment_type.lower() == 'retraction':
conversion_adjustment_type = conversion_adjustment_type_enum.RETRACTION
elif adjustment_type.lower() == 'restatement':
conversion_adjustment_type = (
conversion_adjustment_type_enum.RESTATEMENT)
else:
raise ValueError('Invalid adjustment type specified.')
# Associates conversion adjustments with the existing conversion action.
# The GCLID should have been uploaded before with a conversion
conversion_adjustment = (client.get_type('ConversionAdjustment',
version='v4'))
conversion_action_service = (client.get_service('ConversionActionService',
version='v4'))
conversion_adjustment.conversion_action.value = (
conversion_action_service.conversion_action_path(
customer_id, conversion_action_id))
conversion_adjustment.adjustment_type = conversion_adjustment_type
conversion_adjustment.adjustment_date_time.value = adjustment_date_time
# Set the Gclid Date
conversion_adjustment.gclid_date_time_pair.gclid.value = gclid
conversion_adjustment.gclid_date_time_pair.conversion_date_time.value = (
conversion_date_time)
# Sets adjusted value for adjustment type RESTATEMENT.
if (restatement_value and
conversion_adjustment_type ==
conversion_adjustment_type_enum.RESTATEMENT):
conversion_adjustment.restatement_value.adjusted_value.value = (
float(restatement_value))
conversion_adjustment_upload_service = (
client.get_service('ConversionAdjustmentUploadService', version='v4'))
try:
response = (
conversion_adjustment_upload_service.
upload_conversion_adjustments(customer_id,
[conversion_adjustment],
partial_failure=True))
conversion_adjustment_result = response.results[0]
print(f'Uploaded conversion that occurred at '
f'"{conversion_adjustment_result.adjustment_date_time.value}" '
f'from Gclid '
f'"{conversion_adjustment_result.gclid_date_time_pair.gclid.value}"'
f' to "{conversion_adjustment_result.conversion_action.value}"')
except GoogleAdsException as ex:
print(f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:')
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f'\t\tOn field: {field_path_element.field_name}')
sys.exit(1)
if __name__ == '__main__':
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = GoogleAdsClient.load_from_storage()
parser = argparse.ArgumentParser(
description='Uploads a conversion adjustment.')
# The following argument(s) should be provided to run the example.
parser.add_argument('-c', '--customer_id', type=str,
required=True, help='The Google Ads customer ID.')
parser.add_argument('-a', '--conversion_action_id', type=str,
required=True, help='The conversion action ID to be '
'uploaded to.')
parser.add_argument('-g', '--gclid', type=str,
required=True, help='The Google Click Identifier ID.')
parser.add_argument('-d', '--adjustment_type', type=str,
required=True, help='The Adjustment type, e.g. '
'RETRACTION, RESTATEMENT')
parser.add_argument('-t', '--conversion_date_time', type=str,
required=True, help='The the date and time of the '
'conversion. The format is '
'"yyyy-mm-dd hh:mm:ss+|-hh:mm", e.g. '
'“2019-01-01 12:32:45-08:00”')
parser.add_argument('-v', '--adjustment_date_time', type=str,
required=True, help='The the date and time of the '
'adjustment. The format is '
'"yyyy-mm-dd hh:mm:ss+|-hh:mm", e.g. '
'“2019-01-01 12:32:45-08:00”')
# Optional: Specify an adjusted value for adjustment type RESTATEMENT.
# This value will be ignored if you specify RETRACTION as adjustment type.
parser.add_argument('-r', '--restatement_value', type=str,
required=False, help='The adjusted value for '
'adjustment type RESTATEMENT.')
args = parser.parse_args()
main(google_ads_client, args.customer_id, args.conversion_action_id,
args.gclid, args.adjustment_type, args.conversion_date_time,
args.adjustment_date_time, args.restatement_value)
| 48.294574 | 82 | 0.6626 |
import argparse
import sys
from google.ads.google_ads.client import GoogleAdsClient
from google.ads.google_ads.errors import GoogleAdsException
def main(client, customer_id, conversion_action_id, gclid, adjustment_type,
conversion_date_time, adjustment_date_time, restatement_value):
conversion_adjustment_type_enum = (
client.get_type('ConversionAdjustmentTypeEnum'))
if adjustment_type.lower() == 'retraction':
conversion_adjustment_type = conversion_adjustment_type_enum.RETRACTION
elif adjustment_type.lower() == 'restatement':
conversion_adjustment_type = (
conversion_adjustment_type_enum.RESTATEMENT)
else:
raise ValueError('Invalid adjustment type specified.')
conversion_adjustment = (client.get_type('ConversionAdjustment',
version='v4'))
conversion_action_service = (client.get_service('ConversionActionService',
version='v4'))
conversion_adjustment.conversion_action.value = (
conversion_action_service.conversion_action_path(
customer_id, conversion_action_id))
conversion_adjustment.adjustment_type = conversion_adjustment_type
conversion_adjustment.adjustment_date_time.value = adjustment_date_time
conversion_adjustment.gclid_date_time_pair.gclid.value = gclid
conversion_adjustment.gclid_date_time_pair.conversion_date_time.value = (
conversion_date_time)
if (restatement_value and
conversion_adjustment_type ==
conversion_adjustment_type_enum.RESTATEMENT):
conversion_adjustment.restatement_value.adjusted_value.value = (
float(restatement_value))
conversion_adjustment_upload_service = (
client.get_service('ConversionAdjustmentUploadService', version='v4'))
try:
response = (
conversion_adjustment_upload_service.
upload_conversion_adjustments(customer_id,
[conversion_adjustment],
partial_failure=True))
conversion_adjustment_result = response.results[0]
print(f'Uploaded conversion that occurred at '
f'"{conversion_adjustment_result.adjustment_date_time.value}" '
f'from Gclid '
f'"{conversion_adjustment_result.gclid_date_time_pair.gclid.value}"'
f' to "{conversion_adjustment_result.conversion_action.value}"')
except GoogleAdsException as ex:
print(f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:')
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f'\t\tOn field: {field_path_element.field_name}')
sys.exit(1)
if __name__ == '__main__':
google_ads_client = GoogleAdsClient.load_from_storage()
parser = argparse.ArgumentParser(
description='Uploads a conversion adjustment.')
parser.add_argument('-c', '--customer_id', type=str,
required=True, help='The Google Ads customer ID.')
parser.add_argument('-a', '--conversion_action_id', type=str,
required=True, help='The conversion action ID to be '
'uploaded to.')
parser.add_argument('-g', '--gclid', type=str,
required=True, help='The Google Click Identifier ID.')
parser.add_argument('-d', '--adjustment_type', type=str,
required=True, help='The Adjustment type, e.g. '
'RETRACTION, RESTATEMENT')
parser.add_argument('-t', '--conversion_date_time', type=str,
required=True, help='The the date and time of the '
'conversion. The format is '
'"yyyy-mm-dd hh:mm:ss+|-hh:mm", e.g. '
'“2019-01-01 12:32:45-08:00”')
parser.add_argument('-v', '--adjustment_date_time', type=str,
required=True, help='The the date and time of the '
'adjustment. The format is '
'"yyyy-mm-dd hh:mm:ss+|-hh:mm", e.g. '
'“2019-01-01 12:32:45-08:00”')
parser.add_argument('-r', '--restatement_value', type=str,
required=False, help='The adjusted value for '
'adjustment type RESTATEMENT.')
args = parser.parse_args()
main(google_ads_client, args.customer_id, args.conversion_action_id,
args.gclid, args.adjustment_type, args.conversion_date_time,
args.adjustment_date_time, args.restatement_value)
| true | true |
f73c661beee6fb71cf52b9938a4e6bc9bc759ef4 | 5,921 | py | Python | visualization/eolearn/visualization/eoexecutor_visualization.py | gobaRules/eo-learn | 25174e5e0759e35b616712423f01b03527a4b227 | [
"MIT"
] | null | null | null | visualization/eolearn/visualization/eoexecutor_visualization.py | gobaRules/eo-learn | 25174e5e0759e35b616712423f01b03527a4b227 | [
"MIT"
] | null | null | null | visualization/eolearn/visualization/eoexecutor_visualization.py | gobaRules/eo-learn | 25174e5e0759e35b616712423f01b03527a4b227 | [
"MIT"
] | null | null | null | """
Module with utilities for vizualizing EOExecutor
"""
import os
import inspect
import warnings
import base64
import copy
try:
import matplotlib.pyplot as plt
except ImportError:
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import graphviz
import pygments
import pygments.lexers
from pygments.formatters.html import HtmlFormatter
from jinja2 import Environment, FileSystemLoader
class EOExecutorVisualization:
""" Class handling EOExecutor visualizations, particularly creating reports
"""
def __init__(self, eoexecutor):
"""
:param eoexecutor: An instance of EOExecutor
:type eoexecutor: EOExecutor
"""
self.eoexecutor = eoexecutor
def make_report(self):
""" Makes a html report and saves it into the same folder where logs are stored.
"""
if self.eoexecutor.execution_stats is None:
raise RuntimeError('Cannot produce a report without running the executor first, check EOExecutor.run '
'method')
if os.environ.get('DISPLAY', '') == '':
plt.switch_backend('Agg')
try:
dependency_graph = self._create_dependency_graph()
except graphviz.backend.ExecutableNotFound as ex:
dependency_graph = None
warnings.warn("{}.\nPlease install the system package 'graphviz' (in addition "
"to the python package) to have the dependency graph in the final report!".format(ex),
Warning, stacklevel=2)
task_descriptions = self._get_task_descriptions()
formatter = HtmlFormatter(linenos=True)
task_source = self._render_task_source(formatter)
execution_stats = self._render_execution_errors(formatter)
template = self._get_template()
html = template.render(dependency_graph=dependency_graph,
task_descriptions=task_descriptions,
task_source=task_source,
execution_stats=execution_stats,
execution_logs=self.eoexecutor.execution_logs,
code_css=formatter.get_style_defs())
if not os.path.isdir(self.eoexecutor.report_folder):
os.mkdir(self.eoexecutor.report_folder)
with open(self.eoexecutor.get_report_filename(), 'w') as fout:
fout.write(html)
def _create_dependency_graph(self):
""" Provides an image of dependecy graph
"""
dot = self.eoexecutor.workflow.dependency_graph()
return base64.b64encode(dot.pipe()).decode()
def _get_task_descriptions(self):
""" Prepares a list of task names and their initialization parameters
"""
descriptions = []
for task_id, dependency in self.eoexecutor.workflow.uuid_dict.items():
task = dependency.task
init_args = {key: value.replace('<', '<').replace('>', '>') for key, value in
task.private_task_config.init_args.items()}
desc = {
'title': "{}_{} ({})".format(task.__class__.__name__, task_id[:6], task.__module__),
'args': init_args
}
descriptions.append(desc)
return descriptions
def _render_task_source(self, formatter):
""" Collects source code of each costum task
"""
lexer = pygments.lexers.get_lexer_by_name("python", stripall=True)
sources = {}
for dep in self.eoexecutor.workflow.dependencies:
task = dep.task
if task.__module__.startswith("eolearn"):
continue
key = "{} ({})".format(task.__class__.__name__, task.__module__)
if key in sources:
continue
try:
source = inspect.getsource(task.__class__)
source = pygments.highlight(source, lexer, formatter)
except TypeError:
# Jupyter notebook does not have __file__ method to collect source code
# StackOverflow provides no solutions
# Could be investigated further by looking into Jupyter Notebook source code
source = 'Cannot collect source code of a task which is not defined in a .py file'
sources[key] = source
return sources
def _render_execution_errors(self, formatter):
""" Renders stack traces of those executions which failed
"""
tb_lexer = pygments.lexers.get_lexer_by_name("py3tb", stripall=True)
executions = []
for orig_execution in self.eoexecutor.execution_stats:
execution = copy.deepcopy(orig_execution)
if self.eoexecutor.STATS_ERROR in execution:
execution[self.eoexecutor.STATS_ERROR] = pygments.highlight(execution[self.eoexecutor.STATS_ERROR],
tb_lexer, formatter)
executions.append(execution)
return executions
def _get_template(self):
""" Loads and sets up a template for report
"""
templates_dir = os.path.join(os.path.dirname(__file__), 'report_templates')
env = Environment(loader=FileSystemLoader(templates_dir))
env.filters['datetime'] = self._format_datetime
env.globals.update(timedelta=self._format_timedelta)
template = env.get_template(self.eoexecutor.REPORT_FILENAME)
return template
@staticmethod
def _format_datetime(value):
""" Method for formatting datetime objects into report
"""
return value.strftime('%X %x %Z')
@staticmethod
def _format_timedelta(value1, value2):
""" Method for formatting time delta into report
"""
return str(value2 - value1)
| 35.244048 | 115 | 0.618139 |
import os
import inspect
import warnings
import base64
import copy
try:
import matplotlib.pyplot as plt
except ImportError:
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import graphviz
import pygments
import pygments.lexers
from pygments.formatters.html import HtmlFormatter
from jinja2 import Environment, FileSystemLoader
class EOExecutorVisualization:
def __init__(self, eoexecutor):
self.eoexecutor = eoexecutor
def make_report(self):
if self.eoexecutor.execution_stats is None:
raise RuntimeError('Cannot produce a report without running the executor first, check EOExecutor.run '
'method')
if os.environ.get('DISPLAY', '') == '':
plt.switch_backend('Agg')
try:
dependency_graph = self._create_dependency_graph()
except graphviz.backend.ExecutableNotFound as ex:
dependency_graph = None
warnings.warn("{}.\nPlease install the system package 'graphviz' (in addition "
"to the python package) to have the dependency graph in the final report!".format(ex),
Warning, stacklevel=2)
task_descriptions = self._get_task_descriptions()
formatter = HtmlFormatter(linenos=True)
task_source = self._render_task_source(formatter)
execution_stats = self._render_execution_errors(formatter)
template = self._get_template()
html = template.render(dependency_graph=dependency_graph,
task_descriptions=task_descriptions,
task_source=task_source,
execution_stats=execution_stats,
execution_logs=self.eoexecutor.execution_logs,
code_css=formatter.get_style_defs())
if not os.path.isdir(self.eoexecutor.report_folder):
os.mkdir(self.eoexecutor.report_folder)
with open(self.eoexecutor.get_report_filename(), 'w') as fout:
fout.write(html)
def _create_dependency_graph(self):
dot = self.eoexecutor.workflow.dependency_graph()
return base64.b64encode(dot.pipe()).decode()
def _get_task_descriptions(self):
descriptions = []
for task_id, dependency in self.eoexecutor.workflow.uuid_dict.items():
task = dependency.task
init_args = {key: value.replace('<', '<').replace('>', '>') for key, value in
task.private_task_config.init_args.items()}
desc = {
'title': "{}_{} ({})".format(task.__class__.__name__, task_id[:6], task.__module__),
'args': init_args
}
descriptions.append(desc)
return descriptions
def _render_task_source(self, formatter):
lexer = pygments.lexers.get_lexer_by_name("python", stripall=True)
sources = {}
for dep in self.eoexecutor.workflow.dependencies:
task = dep.task
if task.__module__.startswith("eolearn"):
continue
key = "{} ({})".format(task.__class__.__name__, task.__module__)
if key in sources:
continue
try:
source = inspect.getsource(task.__class__)
source = pygments.highlight(source, lexer, formatter)
except TypeError:
source = 'Cannot collect source code of a task which is not defined in a .py file'
sources[key] = source
return sources
def _render_execution_errors(self, formatter):
tb_lexer = pygments.lexers.get_lexer_by_name("py3tb", stripall=True)
executions = []
for orig_execution in self.eoexecutor.execution_stats:
execution = copy.deepcopy(orig_execution)
if self.eoexecutor.STATS_ERROR in execution:
execution[self.eoexecutor.STATS_ERROR] = pygments.highlight(execution[self.eoexecutor.STATS_ERROR],
tb_lexer, formatter)
executions.append(execution)
return executions
def _get_template(self):
templates_dir = os.path.join(os.path.dirname(__file__), 'report_templates')
env = Environment(loader=FileSystemLoader(templates_dir))
env.filters['datetime'] = self._format_datetime
env.globals.update(timedelta=self._format_timedelta)
template = env.get_template(self.eoexecutor.REPORT_FILENAME)
return template
@staticmethod
def _format_datetime(value):
return value.strftime('%X %x %Z')
@staticmethod
def _format_timedelta(value1, value2):
return str(value2 - value1)
| true | true |
f73c667dad1d042c5d1aa4117b7b71fd494fa09c | 17,125 | py | Python | src/etcd/client.py | thedrow/python-etcd | b4ff9cea95d0dbb1a8f83d9ff140fd9d9d977f8e | [
"MIT"
] | null | null | null | src/etcd/client.py | thedrow/python-etcd | b4ff9cea95d0dbb1a8f83d9ff140fd9d9d977f8e | [
"MIT"
] | null | null | null | src/etcd/client.py | thedrow/python-etcd | b4ff9cea95d0dbb1a8f83d9ff140fd9d9d977f8e | [
"MIT"
] | null | null | null | """
.. module:: python-etcd
:synopsis: A python etcd client.
.. moduleauthor:: Jose Plana <jplana@gmail.com>
"""
import urllib3
import json
import ssl
import etcd
class Client(object):
"""
Client for etcd, the distributed log service using raft.
"""
_MGET = 'GET'
_MPUT = 'PUT'
_MPOST = 'POST'
_MDELETE = 'DELETE'
_comparison_conditions = set(('prevValue', 'prevIndex', 'prevExist'))
_read_options = set(('recursive', 'wait', 'waitIndex', 'sorted', 'consistent'))
_del_conditions = set(('prevValue', 'prevIndex'))
def __init__(
self,
host='127.0.0.1',
port=4001,
read_timeout=60,
allow_redirect=True,
protocol='http',
cert=None,
ca_cert=None,
allow_reconnect=False,
):
"""
Initialize the client.
Args:
host (mixed):
If a string, IP to connect to.
If a tuple ((host, port), (host, port), ...)
port (int): Port used to connect to etcd.
read_timeout (int): max seconds to wait for a read.
allow_redirect (bool): allow the client to connect to other nodes.
+
protocol (str): Protocol used to connect to etcd.
cert (mixed): If a string, the whole ssl client certificate;
if a tuple, the cert and key file names.
ca_cert (str): The ca certificate. If pressent it will enable
validation.
allow_reconnect (bool): allow the client to reconnect to another
etcd server in the cluster in the case the
default one does not respond.
"""
self._machines_cache = []
self._protocol = protocol
def uri(protocol, host, port):
return '%s://%s:%d' % (protocol, host, port)
if not isinstance(host, tuple):
self._host = host
self._port = port
else:
self._host, self._port = host[0]
self._machines_cache.extend(
[uri(self._protocol, *conn) for conn in host])
self._base_uri = uri(self._protocol, self._host, self._port)
self.version_prefix = '/v2'
self._read_timeout = read_timeout
self._allow_redirect = allow_redirect
self._allow_reconnect = allow_reconnect
# SSL Client certificate support
kw = {}
if self._read_timeout > 0:
kw['timeout'] = self._read_timeout
if protocol == 'https':
# If we don't allow TLSv1, clients using older version of OpenSSL
# (<1.0) won't be able to connect.
kw['ssl_version'] = ssl.PROTOCOL_TLSv1
if cert:
if isinstance(cert, tuple):
# Key and cert are separate
kw['cert_file'] = cert[0]
kw['key_file'] = cert[1]
else:
# combined certificate
kw['cert_file'] = cert
if ca_cert:
kw['ca_certs'] = ca_cert
kw['cert_reqs'] = ssl.CERT_REQUIRED
self.http = urllib3.PoolManager(num_pools=10, **kw)
if self._allow_reconnect:
# we need the set of servers in the cluster in order to try
# reconnecting upon error.
self._machines_cache = self.machines
self._machines_cache.remove(self._base_uri)
else:
self._machines_cache = []
@property
def base_uri(self):
"""URI used by the client to connect to etcd."""
return self._base_uri
@property
def host(self):
"""Node to connect etcd."""
return self._host
@property
def port(self):
"""Port to connect etcd."""
return self._port
@property
def protocol(self):
"""Protocol used to connect etcd."""
return self._protocol
@property
def read_timeout(self):
"""Max seconds to wait for a read."""
return self._read_timeout
@property
def allow_redirect(self):
"""Allow the client to connect to other nodes."""
return self._allow_redirect
@property
def machines(self):
"""
Members of the cluster.
Returns:
list. str with all the nodes in the cluster.
>>> print client.machines
['http://127.0.0.1:4001', 'http://127.0.0.1:4002']
"""
return [
node.strip() for node in self.api_execute(
self.version_prefix + '/machines',
self._MGET).data.decode('utf-8').split(',')
]
@property
def leader(self):
"""
Returns:
str. the leader of the cluster.
>>> print client.leader
'http://127.0.0.1:4001'
"""
return self.api_execute(
self.version_prefix + '/leader',
self._MGET).data.decode('ascii')
@property
def key_endpoint(self):
"""
REST key endpoint.
"""
return self.version_prefix + '/keys'
def __contains__(self, key):
"""
Check if a key is available in the cluster.
>>> print 'key' in client
True
"""
try:
self.get(key)
return True
except KeyError:
return False
def _sanitize_key(self, key):
if not key.startswith('/'):
key = "/{}".format(key)
return key
def write(self, key, value, ttl=None, dir=False, append=False, **kwdargs):
"""
Writes the value for a key, possibly doing atomit Compare-and-Swap
Args:
key (str): Key.
value (object): value to set
ttl (int): Time in seconds of expiration (optional).
dir (bool): Set to true if we are writing a directory; default is false.
append (bool): If true, it will post to append the new value to the dir, creating a sequential key. Defaults to false.
Other parameters modifying the write method are accepted:
prevValue (str): compare key to this value, and swap only if corresponding (optional).
prevIndex (int): modify key only if actual modifiedIndex matches the provided one (optional).
prevExist (bool): If false, only create key; if true, only update key.
Returns:
client.EtcdResult
>>> print client.write('/key', 'newValue', ttl=60, prevExist=False).value
'newValue'
"""
key = self._sanitize_key(key)
params = {}
if value is not None:
params['value'] = value
if ttl:
params['ttl'] = ttl
if dir:
if value:
raise etcd.EtcdException(
'Cannot create a directory with a value')
params['dir'] = "true"
for (k, v) in kwdargs.items():
if k in self._comparison_conditions:
if type(v) == bool:
params[k] = v and "true" or "false"
else:
params[k] = v
method = append and self._MPOST or self._MPUT
if '_endpoint' in kwdargs:
path = kwdargs['_endpoint'] + key
else:
path = self.key_endpoint + key
response = self.api_execute(path, method, params=params)
return self._result_from_response(response)
def update(self, obj):
"""
Updates the value for a key atomically. Typical usage would be:
c = etcd.Client()
o = c.read("/somekey")
o.value += 1
c.update(o)
Args:
obj (etcd.EtcdResult): The object that needs updating.
"""
kwdargs = {
'dir': obj.dir,
'ttl': obj.ttl,
'prevExist': True
}
if not obj.dir:
# prevIndex on a dir causes a 'not a file' error. d'oh!
kwdargs['prevIndex'] = obj.modifiedIndex
return self.write(obj.key, obj.value, **kwdargs)
def read(self, key, **kwdargs):
"""
Returns the value of the key 'key'.
Args:
key (str): Key.
Recognized kwd args
recursive (bool): If you should fetch recursively a dir
wait (bool): If we should wait and return next time the key is changed
waitIndex (int): The index to fetch results from.
sorted (bool): Sort the output keys (alphanumerically)
timeout (int): max seconds to wait for a read.
Returns:
client.EtcdResult (or an array of client.EtcdResult if a
subtree is queried)
Raises:
KeyValue: If the key doesn't exists.
urllib3.exceptions.TimeoutError: If timeout is reached.
>>> print client.get('/key').value
'value'
"""
key = self._sanitize_key(key)
params = {}
for (k, v) in kwdargs.items():
if k in self._read_options:
if type(v) == bool:
params[k] = v and "true" or "false"
else:
params[k] = v
timeout = kwdargs.get('timeout', None)
response = self.api_execute(
self.key_endpoint + key, self._MGET, params=params, timeout=timeout)
return self._result_from_response(response)
def delete(self, key, recursive=None, dir=None, **kwdargs):
"""
Removed a key from etcd.
Args:
key (str): Key.
recursive (bool): if we want to recursively delete a directory, set
it to true
dir (bool): if we want to delete a directory, set it to true
prevValue (str): compare key to this value, and swap only if
corresponding (optional).
prevIndex (int): modify key only if actual modifiedIndex matches the
provided one (optional).
Returns:
client.EtcdResult
Raises:
KeyValue: If the key doesn't exists.
>>> print client.delete('/key').key
'/key'
"""
key = self._sanitize_key(key)
kwds = {}
if recursive is not None:
kwds['recursive'] = recursive and "true" or "false"
if dir is not None:
kwds['dir'] = dir and "true" or "false"
for k in self._del_conditions:
if k in kwdargs:
kwds[k] = kwdargs[k]
response = self.api_execute(
self.key_endpoint + key, self._MDELETE, params=kwds)
return self._result_from_response(response)
# Higher-level methods on top of the basic primitives
def test_and_set(self, key, value, prev_value, ttl=None):
"""
Atomic test & set operation.
It will check if the value of 'key' is 'prev_value',
if the the check is correct will change the value for 'key' to 'value'
if the the check is false an exception will be raised.
Args:
key (str): Key.
value (object): value to set
prev_value (object): previous value.
ttl (int): Time in seconds of expiration (optional).
Returns:
client.EtcdResult
Raises:
ValueError: When the 'prev_value' is not the current value.
>>> print client.test_and_set('/key', 'new', 'old', ttl=60).value
'new'
"""
return self.write(key, value, prevValue=prev_value, ttl=ttl)
def set(self, key, value, ttl=None):
"""
Compatibility: sets the value of the key 'key' to the value 'value'
Args:
key (str): Key.
value (object): value to set
ttl (int): Time in seconds of expiration (optional).
Returns:
client.EtcdResult
Raises:
etcd.EtcdException: when something weird goes wrong.
"""
return self.write(key, value, ttl=ttl)
def get(self, key):
"""
Returns the value of the key 'key'.
Args:
key (str): Key.
Returns:
client.EtcdResult
Raises:
KeyError: If the key doesn't exists.
>>> print client.get('/key').value
'value'
"""
return self.read(key)
def watch(self, key, index=None, timeout=None):
"""
Blocks until a new event has been received, starting at index 'index'
Args:
key (str): Key.
index (int): Index to start from.
timeout (int): max seconds to wait for a read.
Returns:
client.EtcdResult
Raises:
KeyValue: If the key doesn't exists.
urllib3.exceptions.TimeoutError: If timeout is reached.
>>> print client.watch('/key').value
'value'
"""
if index:
return self.read(key, wait=True, waitIndex=index, timeout=timeout)
else:
return self.read(key, wait=True, timeout=timeout)
def eternal_watch(self, key, index=None):
"""
Generator that will yield changes from a key.
Note that this method will block forever until an event is generated.
Args:
key (str): Key to subcribe to.
index (int): Index from where the changes will be received.
Yields:
client.EtcdResult
>>> for event in client.eternal_watch('/subcription_key'):
... print event.value
...
value1
value2
"""
local_index = index
while True:
response = self.watch(key, index=local_index, timeout=0)
if local_index is not None:
local_index += 1
yield response
def get_lock(self, *args, **kwargs):
return etcd.Lock(self, *args, **kwargs)
@property
def election(self):
return etcd.LeaderElection(self)
def _result_from_response(self, response):
""" Creates an EtcdResult from json dictionary """
try:
res = json.loads(response.data.decode('utf-8'))
r = etcd.EtcdResult(**res)
if response.status == 201:
r.newKey = True
r.parse_headers(response)
return r
except Exception as e:
raise etcd.EtcdException(
'Unable to decode server response: %s' % e)
def _next_server(self):
""" Selects the next server in the list, refreshes the server list. """
try:
return self._machines_cache.pop()
except IndexError:
raise etcd.EtcdException('No more machines in the cluster')
def api_execute(self, path, method, params=None, timeout=None):
""" Executes the query. """
some_request_failed = False
response = False
if timeout is None:
timeout = self.read_timeout
if timeout == 0:
timeout = None
if not path.startswith('/'):
raise ValueError('Path does not start with /')
while not response:
try:
url = self._base_uri + path
if (method == self._MGET) or (method == self._MDELETE):
response = self.http.request(
method,
url,
timeout=timeout,
fields=params,
redirect=self.allow_redirect)
elif (method == self._MPUT) or (method == self._MPOST):
response = self.http.request_encode_body(
method,
url,
fields=params,
timeout=timeout,
encode_multipart=False,
redirect=self.allow_redirect)
else:
raise etcd.EtcdException(
'HTTP method {} not supported'.format(method))
except urllib3.exceptions.MaxRetryError:
self._base_uri = self._next_server()
some_request_failed = True
if some_request_failed:
self._machines_cache = self.machines
self._machines_cache.remove(self._base_uri)
return self._handle_server_response(response)
def _handle_server_response(self, response):
""" Handles the server response """
if response.status in [200, 201]:
return response
else:
resp = response.data.decode('utf-8')
# throw the appropriate exception
try:
r = json.loads(resp)
except ValueError:
r = None
if r:
etcd.EtcdError.handle(**r)
else:
raise etcd.EtcdException(resp)
| 28.494176 | 130 | 0.529927 | import urllib3
import json
import ssl
import etcd
class Client(object):
_MGET = 'GET'
_MPUT = 'PUT'
_MPOST = 'POST'
_MDELETE = 'DELETE'
_comparison_conditions = set(('prevValue', 'prevIndex', 'prevExist'))
_read_options = set(('recursive', 'wait', 'waitIndex', 'sorted', 'consistent'))
_del_conditions = set(('prevValue', 'prevIndex'))
def __init__(
self,
host='127.0.0.1',
port=4001,
read_timeout=60,
allow_redirect=True,
protocol='http',
cert=None,
ca_cert=None,
allow_reconnect=False,
):
self._machines_cache = []
self._protocol = protocol
def uri(protocol, host, port):
return '%s://%s:%d' % (protocol, host, port)
if not isinstance(host, tuple):
self._host = host
self._port = port
else:
self._host, self._port = host[0]
self._machines_cache.extend(
[uri(self._protocol, *conn) for conn in host])
self._base_uri = uri(self._protocol, self._host, self._port)
self.version_prefix = '/v2'
self._read_timeout = read_timeout
self._allow_redirect = allow_redirect
self._allow_reconnect = allow_reconnect
kw = {}
if self._read_timeout > 0:
kw['timeout'] = self._read_timeout
if protocol == 'https':
# (<1.0) won't be able to connect.
kw['ssl_version'] = ssl.PROTOCOL_TLSv1
if cert:
if isinstance(cert, tuple):
kw['cert_file'] = cert[0]
kw['key_file'] = cert[1]
else:
kw['cert_file'] = cert
if ca_cert:
kw['ca_certs'] = ca_cert
kw['cert_reqs'] = ssl.CERT_REQUIRED
self.http = urllib3.PoolManager(num_pools=10, **kw)
if self._allow_reconnect:
self._machines_cache = self.machines
self._machines_cache.remove(self._base_uri)
else:
self._machines_cache = []
@property
def base_uri(self):
return self._base_uri
@property
def host(self):
return self._host
@property
def port(self):
return self._port
@property
def protocol(self):
return self._protocol
@property
def read_timeout(self):
return self._read_timeout
@property
def allow_redirect(self):
return self._allow_redirect
@property
def machines(self):
return [
node.strip() for node in self.api_execute(
self.version_prefix + '/machines',
self._MGET).data.decode('utf-8').split(',')
]
@property
def leader(self):
return self.api_execute(
self.version_prefix + '/leader',
self._MGET).data.decode('ascii')
@property
def key_endpoint(self):
return self.version_prefix + '/keys'
def __contains__(self, key):
try:
self.get(key)
return True
except KeyError:
return False
def _sanitize_key(self, key):
if not key.startswith('/'):
key = "/{}".format(key)
return key
def write(self, key, value, ttl=None, dir=False, append=False, **kwdargs):
key = self._sanitize_key(key)
params = {}
if value is not None:
params['value'] = value
if ttl:
params['ttl'] = ttl
if dir:
if value:
raise etcd.EtcdException(
'Cannot create a directory with a value')
params['dir'] = "true"
for (k, v) in kwdargs.items():
if k in self._comparison_conditions:
if type(v) == bool:
params[k] = v and "true" or "false"
else:
params[k] = v
method = append and self._MPOST or self._MPUT
if '_endpoint' in kwdargs:
path = kwdargs['_endpoint'] + key
else:
path = self.key_endpoint + key
response = self.api_execute(path, method, params=params)
return self._result_from_response(response)
def update(self, obj):
kwdargs = {
'dir': obj.dir,
'ttl': obj.ttl,
'prevExist': True
}
if not obj.dir:
kwdargs['prevIndex'] = obj.modifiedIndex
return self.write(obj.key, obj.value, **kwdargs)
def read(self, key, **kwdargs):
key = self._sanitize_key(key)
params = {}
for (k, v) in kwdargs.items():
if k in self._read_options:
if type(v) == bool:
params[k] = v and "true" or "false"
else:
params[k] = v
timeout = kwdargs.get('timeout', None)
response = self.api_execute(
self.key_endpoint + key, self._MGET, params=params, timeout=timeout)
return self._result_from_response(response)
def delete(self, key, recursive=None, dir=None, **kwdargs):
key = self._sanitize_key(key)
kwds = {}
if recursive is not None:
kwds['recursive'] = recursive and "true" or "false"
if dir is not None:
kwds['dir'] = dir and "true" or "false"
for k in self._del_conditions:
if k in kwdargs:
kwds[k] = kwdargs[k]
response = self.api_execute(
self.key_endpoint + key, self._MDELETE, params=kwds)
return self._result_from_response(response)
# Higher-level methods on top of the basic primitives
def test_and_set(self, key, value, prev_value, ttl=None):
return self.write(key, value, prevValue=prev_value, ttl=ttl)
def set(self, key, value, ttl=None):
return self.write(key, value, ttl=ttl)
def get(self, key):
return self.read(key)
def watch(self, key, index=None, timeout=None):
if index:
return self.read(key, wait=True, waitIndex=index, timeout=timeout)
else:
return self.read(key, wait=True, timeout=timeout)
def eternal_watch(self, key, index=None):
local_index = index
while True:
response = self.watch(key, index=local_index, timeout=0)
if local_index is not None:
local_index += 1
yield response
def get_lock(self, *args, **kwargs):
return etcd.Lock(self, *args, **kwargs)
@property
def election(self):
return etcd.LeaderElection(self)
def _result_from_response(self, response):
try:
res = json.loads(response.data.decode('utf-8'))
r = etcd.EtcdResult(**res)
if response.status == 201:
r.newKey = True
r.parse_headers(response)
return r
except Exception as e:
raise etcd.EtcdException(
'Unable to decode server response: %s' % e)
def _next_server(self):
try:
return self._machines_cache.pop()
except IndexError:
raise etcd.EtcdException('No more machines in the cluster')
def api_execute(self, path, method, params=None, timeout=None):
some_request_failed = False
response = False
if timeout is None:
timeout = self.read_timeout
if timeout == 0:
timeout = None
if not path.startswith('/'):
raise ValueError('Path does not start with /')
while not response:
try:
url = self._base_uri + path
if (method == self._MGET) or (method == self._MDELETE):
response = self.http.request(
method,
url,
timeout=timeout,
fields=params,
redirect=self.allow_redirect)
elif (method == self._MPUT) or (method == self._MPOST):
response = self.http.request_encode_body(
method,
url,
fields=params,
timeout=timeout,
encode_multipart=False,
redirect=self.allow_redirect)
else:
raise etcd.EtcdException(
'HTTP method {} not supported'.format(method))
except urllib3.exceptions.MaxRetryError:
self._base_uri = self._next_server()
some_request_failed = True
if some_request_failed:
self._machines_cache = self.machines
self._machines_cache.remove(self._base_uri)
return self._handle_server_response(response)
def _handle_server_response(self, response):
if response.status in [200, 201]:
return response
else:
resp = response.data.decode('utf-8')
# throw the appropriate exception
try:
r = json.loads(resp)
except ValueError:
r = None
if r:
etcd.EtcdError.handle(**r)
else:
raise etcd.EtcdException(resp)
| true | true |
f73c67238c4af361622b2d8a0be5d1176f4d0c2d | 931 | py | Python | setup.py | sahandilshan/airML | 599e6b14e517ec86cd4435f760c19587e3db5627 | [
"Apache-2.0"
] | null | null | null | setup.py | sahandilshan/airML | 599e6b14e517ec86cd4435f760c19587e3db5627 | [
"Apache-2.0"
] | null | null | null | setup.py | sahandilshan/airML | 599e6b14e517ec86cd4435f760c19587e3db5627 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="airML",
version="0.0.2",
author="Lahiru Oshara Hinguruduwa",
author_email='oshara.16@cse.mrt.ac.lk',
url='https://github.com/AKSW/airML',
description="application will allow users to " +
"share and dereference ML models.",
long_description=long_description,
long_description_content_type="text/markdown",
py_modules=['airML'],
packages=["airML"],
# package_dir={'': 'airML'},
package_data={'': ['*.jar']},
include_package_data=True,
install_requires=['click>=7.1.2'],
entry_points={
'console_scripts': [
'airML=airML.airML:execute_kbox_command',
],
},
license='Apache',
classifiers=[
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python :: 3',
],
)
| 27.382353 | 61 | 0.615467 | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="airML",
version="0.0.2",
author="Lahiru Oshara Hinguruduwa",
author_email='oshara.16@cse.mrt.ac.lk',
url='https://github.com/AKSW/airML',
description="application will allow users to " +
"share and dereference ML models.",
long_description=long_description,
long_description_content_type="text/markdown",
py_modules=['airML'],
packages=["airML"],
package_data={'': ['*.jar']},
include_package_data=True,
install_requires=['click>=7.1.2'],
entry_points={
'console_scripts': [
'airML=airML.airML:execute_kbox_command',
],
},
license='Apache',
classifiers=[
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python :: 3',
],
)
| true | true |
f73c6743b69657d4f3150a79436acfb8c6d12009 | 4,749 | py | Python | handler.py | kgorskowski/codedeploy_asg_helper | 1a78ba23c7beadce62bfb41ddee7c784d8f1e771 | [
"MIT"
] | null | null | null | handler.py | kgorskowski/codedeploy_asg_helper | 1a78ba23c7beadce62bfb41ddee7c784d8f1e771 | [
"MIT"
] | null | null | null | handler.py | kgorskowski/codedeploy_asg_helper | 1a78ba23c7beadce62bfb41ddee7c784d8f1e771 | [
"MIT"
] | null | null | null | import json
import boto3
import json
autoscaling = boto3.client('autoscaling')
processes_to_suspend = ["AZRebalance", "AlarmNotification", "ScheduledActions", "ReplaceUnhealthy"]
def update_autoscaling_group(autoscaling_group, asg_min_size):
print("Trying to reset %s to minimal size of %i instances" % (autoscaling_group, asg_min_size))
client = boto3.client('autoscaling')
response = client.update_auto_scaling_group(
AutoScalingGroupName=autoscaling_group,
MinSize=asg_min_size
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print("DEBUG: Updating Autoscaling Group minimal size successfull")
return True
else:
print("ERROR: Unable to reset minimal size of '" + autoscaling_group_name + "'")
return False
def get_asg_min_size(asg):
client = boto3.client('autoscaling')
response = client.describe_auto_scaling_groups(
AutoScalingGroupNames=
[ asg ]
)
print(response)
for i in response['AutoScalingGroups'][0]['Tags']:
print(i)
if "ASGMinSize" in i['Key']:
asg_min_size = int(i['Value'])
return asg_min_size
else:
return False
def get_autoscaling_group(deployment_group):
asg_list = []
client = boto3.client('autoscaling')
filter = list(
[
{ 'Name': "key", 'Values': [ 'AutomatedASGScript '] },
{ 'Name': "value", 'Values': [ 'true' ] },
{ 'Name': "key", 'Values': [ 'DeploymentGroup' ] },
{ 'Name': "value", 'Values': [ deployment_group ] }
]
);
response = client.describe_tags(Filters=filter)
print(response)
if not response['Tags']:
print('Found no Autoscaling Group for Deployment Group %s - exiting' % deployment_group)
exit(1);
else:
print('Found Autoscaling Groups for Deployment Group %s' % deployment_group)
for i in response['Tags']:
asg_list.append(i['ResourceId'])
return(asg_list)
def suspend_processes( autoscaling_group_name, processes_to_suspend ):
response = autoscaling.suspend_processes(
AutoScalingGroupName=autoscaling_group_name,
ScalingProcesses=processes_to_suspend
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print("DEBUG: Autoscaling Processes suspended")
return True
else:
print("ERROR: Unable to suspend_processes on '" + autoscaling_group_name + "'")
return False
def resume_processes( autoscaling_group_name, processes_to_suspend ):
response = autoscaling.resume_processes(
AutoScalingGroupName=autoscaling_group_name,
ScalingProcesses=processes_to_suspend
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print("DEBUG: Autoscaling Processes resumed")
return True
else:
print("ERROR: Unable to resume_processes on '" + autoscaling_group_name + "'")
return False
def autoscale(event, context):
message_json = json.loads(event['Records'][0]['Sns']['Message'])
print(message_json)
deployment_group = message_json['deploymentGroupName']
autoscaling_group_name = get_autoscaling_group(deployment_group)
deployment_group = message_json['deploymentGroupName']
autoscaling_group_name = get_autoscaling_group(deployment_group)
for i in autoscaling_group_name:
print(i)
asg_min_size = get_asg_min_size(i)
if not asg_min_size:
print("Found no ASGMinSize Tag for %s" % i)
else:
print("Found ASG %s with min. size of %s instances" % (i, asg_min_size))
topic_arn = event['Records'][0]['Sns']['TopicArn']
print('Got Message from %s' % topic_arn)
if "suspendAutoscaling" in topic_arn:
item = suspend_processes(i, processes_to_suspend)
body = {
"message": "Suspending Autoscaling Processes",
"successful": item
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
elif "resumeAutoscaling" in topic_arn:
if asg_min_size:
update_autoscaling_group(i, asg_min_size)
item = resume_processes(i, processes_to_suspend)
body = {
"message": "Resuming Autoscaling Processes",
"succesful": item
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
else:
print('Recieved Message from unknown SNS Topic %s - Exiting ' % topic_arn)
return False
print("DEBUG:", response)
return response
| 35.440299 | 99 | 0.625184 | import json
import boto3
import json
autoscaling = boto3.client('autoscaling')
processes_to_suspend = ["AZRebalance", "AlarmNotification", "ScheduledActions", "ReplaceUnhealthy"]
def update_autoscaling_group(autoscaling_group, asg_min_size):
print("Trying to reset %s to minimal size of %i instances" % (autoscaling_group, asg_min_size))
client = boto3.client('autoscaling')
response = client.update_auto_scaling_group(
AutoScalingGroupName=autoscaling_group,
MinSize=asg_min_size
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print("DEBUG: Updating Autoscaling Group minimal size successfull")
return True
else:
print("ERROR: Unable to reset minimal size of '" + autoscaling_group_name + "'")
return False
def get_asg_min_size(asg):
client = boto3.client('autoscaling')
response = client.describe_auto_scaling_groups(
AutoScalingGroupNames=
[ asg ]
)
print(response)
for i in response['AutoScalingGroups'][0]['Tags']:
print(i)
if "ASGMinSize" in i['Key']:
asg_min_size = int(i['Value'])
return asg_min_size
else:
return False
def get_autoscaling_group(deployment_group):
asg_list = []
client = boto3.client('autoscaling')
filter = list(
[
{ 'Name': "key", 'Values': [ 'AutomatedASGScript '] },
{ 'Name': "value", 'Values': [ 'true' ] },
{ 'Name': "key", 'Values': [ 'DeploymentGroup' ] },
{ 'Name': "value", 'Values': [ deployment_group ] }
]
);
response = client.describe_tags(Filters=filter)
print(response)
if not response['Tags']:
print('Found no Autoscaling Group for Deployment Group %s - exiting' % deployment_group)
exit(1);
else:
print('Found Autoscaling Groups for Deployment Group %s' % deployment_group)
for i in response['Tags']:
asg_list.append(i['ResourceId'])
return(asg_list)
def suspend_processes( autoscaling_group_name, processes_to_suspend ):
response = autoscaling.suspend_processes(
AutoScalingGroupName=autoscaling_group_name,
ScalingProcesses=processes_to_suspend
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print("DEBUG: Autoscaling Processes suspended")
return True
else:
print("ERROR: Unable to suspend_processes on '" + autoscaling_group_name + "'")
return False
def resume_processes( autoscaling_group_name, processes_to_suspend ):
response = autoscaling.resume_processes(
AutoScalingGroupName=autoscaling_group_name,
ScalingProcesses=processes_to_suspend
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print("DEBUG: Autoscaling Processes resumed")
return True
else:
print("ERROR: Unable to resume_processes on '" + autoscaling_group_name + "'")
return False
def autoscale(event, context):
message_json = json.loads(event['Records'][0]['Sns']['Message'])
print(message_json)
deployment_group = message_json['deploymentGroupName']
autoscaling_group_name = get_autoscaling_group(deployment_group)
deployment_group = message_json['deploymentGroupName']
autoscaling_group_name = get_autoscaling_group(deployment_group)
for i in autoscaling_group_name:
print(i)
asg_min_size = get_asg_min_size(i)
if not asg_min_size:
print("Found no ASGMinSize Tag for %s" % i)
else:
print("Found ASG %s with min. size of %s instances" % (i, asg_min_size))
topic_arn = event['Records'][0]['Sns']['TopicArn']
print('Got Message from %s' % topic_arn)
if "suspendAutoscaling" in topic_arn:
item = suspend_processes(i, processes_to_suspend)
body = {
"message": "Suspending Autoscaling Processes",
"successful": item
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
elif "resumeAutoscaling" in topic_arn:
if asg_min_size:
update_autoscaling_group(i, asg_min_size)
item = resume_processes(i, processes_to_suspend)
body = {
"message": "Resuming Autoscaling Processes",
"succesful": item
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
else:
print('Recieved Message from unknown SNS Topic %s - Exiting ' % topic_arn)
return False
print("DEBUG:", response)
return response
| true | true |
f73c6748a5451ba11183cc6044e91ed109913445 | 3,224 | py | Python | tests/loops/test_all.py | FeryET/pytorch-lightning | b1f8b111b5085373599758a4e155a482259cdbf0 | [
"Apache-2.0"
] | 2 | 2022-01-24T12:40:51.000Z | 2022-01-25T02:26:32.000Z | tests/loops/test_all.py | FeryET/pytorch-lightning | b1f8b111b5085373599758a4e155a482259cdbf0 | [
"Apache-2.0"
] | 1 | 2022-02-09T17:24:56.000Z | 2022-02-09T17:24:56.000Z | tests/loops/test_all.py | FeryET/pytorch-lightning | b1f8b111b5085373599758a4e155a482259cdbf0 | [
"Apache-2.0"
] | 2 | 2022-02-11T08:26:13.000Z | 2022-03-21T03:48:34.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytorch_lightning import Callback, Trainer
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
class BatchHookObserverCallback(Callback):
def on_train_batch_start(self, trainer, pl_module, batch, *args):
assert batch.device == pl_module.device
def on_train_batch_end(self, trainer, pl_module, outputs, batch, *args):
assert batch.device == pl_module.device
def on_validation_batch_start(self, trainer, pl_module, batch, *args):
assert batch.device == pl_module.device
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, *args):
assert batch.device == pl_module.device
def on_test_batch_start(self, trainer, pl_module, batch, *args):
assert batch.device == pl_module.device
def on_test_batch_end(self, trainer, pl_module, outputs, batch, *args):
assert batch.device == pl_module.device
def on_predict_batch_start(self, trainer, pl_module, batch, *args):
assert batch.device == pl_module.device
def on_predict_batch_end(self, trainer, pl_module, outputs, batch, *args):
assert batch.device == pl_module.device
class BatchHookObserverModel(BoringModel):
def on_train_batch_start(self, batch, *args):
assert batch.device == self.device
def on_train_batch_end(self, outputs, batch, *args):
assert batch.device == self.device
def on_validation_batch_start(self, batch, *args):
assert batch.device == self.device
def on_validation_batch_end(self, outputs, batch, *args):
assert batch.device == self.device
def on_test_batch_start(self, batch, *args):
assert batch.device == self.device
def on_test_batch_end(self, outputs, batch, *args):
assert batch.device == self.device
def on_predict_batch_start(self, batch, *args):
assert batch.device == self.device
def on_predict_batch_end(self, outputs, batch, *args):
assert batch.device == self.device
@RunIf(min_gpus=1)
def test_callback_batch_on_device(tmpdir):
"""Test that the batch object sent to the on_*_batch_start/end hooks is on the right device."""
batch_callback = BatchHookObserverCallback()
model = BatchHookObserverModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=1,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
accelerator="gpu",
devices=1,
callbacks=[batch_callback],
)
trainer.fit(model)
trainer.validate(model)
trainer.test(model)
trainer.predict(model)
| 34.666667 | 99 | 0.71371 |
from pytorch_lightning import Callback, Trainer
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
class BatchHookObserverCallback(Callback):
def on_train_batch_start(self, trainer, pl_module, batch, *args):
assert batch.device == pl_module.device
def on_train_batch_end(self, trainer, pl_module, outputs, batch, *args):
assert batch.device == pl_module.device
def on_validation_batch_start(self, trainer, pl_module, batch, *args):
assert batch.device == pl_module.device
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, *args):
assert batch.device == pl_module.device
def on_test_batch_start(self, trainer, pl_module, batch, *args):
assert batch.device == pl_module.device
def on_test_batch_end(self, trainer, pl_module, outputs, batch, *args):
assert batch.device == pl_module.device
def on_predict_batch_start(self, trainer, pl_module, batch, *args):
assert batch.device == pl_module.device
def on_predict_batch_end(self, trainer, pl_module, outputs, batch, *args):
assert batch.device == pl_module.device
class BatchHookObserverModel(BoringModel):
def on_train_batch_start(self, batch, *args):
assert batch.device == self.device
def on_train_batch_end(self, outputs, batch, *args):
assert batch.device == self.device
def on_validation_batch_start(self, batch, *args):
assert batch.device == self.device
def on_validation_batch_end(self, outputs, batch, *args):
assert batch.device == self.device
def on_test_batch_start(self, batch, *args):
assert batch.device == self.device
def on_test_batch_end(self, outputs, batch, *args):
assert batch.device == self.device
def on_predict_batch_start(self, batch, *args):
assert batch.device == self.device
def on_predict_batch_end(self, outputs, batch, *args):
assert batch.device == self.device
@RunIf(min_gpus=1)
def test_callback_batch_on_device(tmpdir):
batch_callback = BatchHookObserverCallback()
model = BatchHookObserverModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=1,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
accelerator="gpu",
devices=1,
callbacks=[batch_callback],
)
trainer.fit(model)
trainer.validate(model)
trainer.test(model)
trainer.predict(model)
| true | true |
f73c67ac2b203e54db613e5bf7b915eb9fdeadc9 | 2,010 | py | Python | codraft/app.py | CODRA-Software/CodraFT | d3a6e7abbf001b8d0d288b23f7ff81fcaa3b3659 | [
"CECILL-B",
"BSD-3-Clause"
] | null | null | null | codraft/app.py | CODRA-Software/CodraFT | d3a6e7abbf001b8d0d288b23f7ff81fcaa3b3659 | [
"CECILL-B",
"BSD-3-Clause"
] | null | null | null | codraft/app.py | CODRA-Software/CodraFT | d3a6e7abbf001b8d0d288b23f7ff81fcaa3b3659 | [
"CECILL-B",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed under the terms of the BSD 3-Clause or the CeCILL-B License
# (see codraft/__init__.py for details)
"""
CodraFT launcher module
"""
from guidata.configtools import get_image_file_path
from qtpy import QtCore as QC
from qtpy import QtGui as QG
from qtpy import QtWidgets as QW
from codraft.config import Conf
from codraft.core.gui.main import CodraFTMainWindow
from codraft.env import execenv
from codraft.utils.qthelpers import qt_app_context
def create(
splash: bool = True, console: bool = None, objects=None, h5files=None, size=None
) -> CodraFTMainWindow:
"""Create CodraFT application and return mainwindow instance"""
if splash:
# Showing splash screen
pixmap = QG.QPixmap(get_image_file_path("codraft_titleicon.png"))
splashscreen = QW.QSplashScreen(pixmap, QC.Qt.WindowStaysOnTopHint)
splashscreen.show()
window = CodraFTMainWindow(console=console)
if size is not None:
width, height = size
window.resize(width, height)
if splash:
splashscreen.finish(window)
if Conf.main.window_maximized.get(None):
window.showMaximized()
else:
window.showNormal()
if h5files is not None:
window.open_h5_files(h5files, import_all=True)
if objects is not None:
for obj in objects:
window.add_object(obj)
return window
def run(console=None, objects=None, h5files=None, size=None):
"""Run the CodraFT application
Note: this function is an entry point in `setup.py` and therefore
may not be moved without modifying the package setup script."""
if execenv.h5files:
h5files = ([] if h5files is None else h5files) + execenv.h5files
with qt_app_context(exec_loop=True):
window = create(
splash=True, console=console, objects=objects, h5files=h5files, size=size
)
window.check_dependencies()
window.check_for_previous_crash()
if __name__ == "__main__":
run()
| 30.454545 | 85 | 0.696517 |
from guidata.configtools import get_image_file_path
from qtpy import QtCore as QC
from qtpy import QtGui as QG
from qtpy import QtWidgets as QW
from codraft.config import Conf
from codraft.core.gui.main import CodraFTMainWindow
from codraft.env import execenv
from codraft.utils.qthelpers import qt_app_context
def create(
splash: bool = True, console: bool = None, objects=None, h5files=None, size=None
) -> CodraFTMainWindow:
if splash:
pixmap = QG.QPixmap(get_image_file_path("codraft_titleicon.png"))
splashscreen = QW.QSplashScreen(pixmap, QC.Qt.WindowStaysOnTopHint)
splashscreen.show()
window = CodraFTMainWindow(console=console)
if size is not None:
width, height = size
window.resize(width, height)
if splash:
splashscreen.finish(window)
if Conf.main.window_maximized.get(None):
window.showMaximized()
else:
window.showNormal()
if h5files is not None:
window.open_h5_files(h5files, import_all=True)
if objects is not None:
for obj in objects:
window.add_object(obj)
return window
def run(console=None, objects=None, h5files=None, size=None):
if execenv.h5files:
h5files = ([] if h5files is None else h5files) + execenv.h5files
with qt_app_context(exec_loop=True):
window = create(
splash=True, console=console, objects=objects, h5files=h5files, size=size
)
window.check_dependencies()
window.check_for_previous_crash()
if __name__ == "__main__":
run()
| true | true |
f73c69d35aff17f4c59a28633808cc8132b49e7a | 5,392 | py | Python | shorturl/index.py | chinalu/ShortURL | 00e9c7581539430ad033a8dfa62f42f9fd161f52 | [
"MIT"
] | null | null | null | shorturl/index.py | chinalu/ShortURL | 00e9c7581539430ad033a8dfa62f42f9fd161f52 | [
"MIT"
] | null | null | null | shorturl/index.py | chinalu/ShortURL | 00e9c7581539430ad033a8dfa62f42f9fd161f52 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import re
import web
import sys
sys.path.append("/home/www/ShortURL/shorturl")
from libs.qrcode import QRCode, ErrorCorrectLevel
import settings
import models
debug = web.config.debug = settings.DEBUG
render = web.template.render(settings.TEMPLATE_DIR,
base=settings.BASE_TEMPLATE)
app = web.application(settings.URLS, globals())
db = models.DB(settings.DATABASES)
class Index(object):
"""首页"""
def GET(self):
return render.index()
class Shorten(object):
"""网址缩短结果页"""
def __init__(self):
self.db = db
def add_scheme(self, url):
"""给 URL 添加 scheme(qq.com -> http://qq.com)"""
# 支持的 URL scheme
# 常规 URL scheme
scheme2 = re.compile(r'(?i)^[a-z][a-z0-9+.\-]*://')
# 特殊 URL scheme
scheme3 = ('git@', 'mailto:', 'javascript:', 'about:', 'opera:',
'afp:', 'aim:', 'apt:', 'attachment:', 'bitcoin:',
'callto:', 'cid:', 'data:', 'dav:', 'dns:', 'fax:', 'feed:',
'gg:', 'go:', 'gtalk:', 'h323:', 'iax:', 'im:', 'itms:',
'jar:', 'magnet:', 'maps:', 'message:', 'mid:', 'msnim:',
'mvn:', 'news:', 'palm:', 'paparazzi:', 'platform:',
'pres:', 'proxy:', 'psyc:', 'query:', 'session:', 'sip:',
'sips:', 'skype:', 'sms:', 'spotify:', 'steam:', 'tel:',
'things:', 'urn:', 'uuid:', 'view-source:', 'ws:', 'xfire:',
'xmpp:', 'ymsgr:', 'doi:',
)
url_lower = url.lower()
# 如果不包含规定的 URL scheme,则给网址添加 http:// 前缀
scheme = scheme2.match(url_lower)
if not scheme:
for scheme in scheme3:
url_splits = url_lower.split(scheme)
if len(url_splits) > 1:
break
else:
url = 'http://' + url
return url
def qrcode_table(self, data, type_number=4, error_correct_level='H'):
"""生成 QR Code html 表格,可以通过 css 控制黑白块的显示"""
if error_correct_level == 'L':
error_correct_level = ErrorCorrectLevel.L
elif error_correct_level == 'M':
error_correct_level = ErrorCorrectLevel.M
elif error_correct_level == 'Q':
error_correct_level = ErrorCorrectLevel.Q
else:
error_correct_level = ErrorCorrectLevel.H
qr = QRCode()
qr.setTypeNumber(type_number)
qr.setErrorCorrectLevel(error_correct_level)
qr.addData(data)
qr.make()
html = '<table id="qrcode-table">'
for r in range(qr.getModuleCount()):
html += "<tr>"
for c in range(qr.getModuleCount()):
if qr.isDark(r, c):
html += '<td class="dark" />'
else:
html += '<td class="white" />'
html += '</tr>'
html += '</table>'
return html
def POST(self, get_json=False):
url = web.input(url='').url.strip()
auth_token = web.input(auth='').strip()
print(settings.AUTH_TOKEN)
print(auth_token)
if not url:
return web.badrequest()
url = self.add_scheme(url)
if debug:
print(repr(url))
# 判断是否已存在相应的数据
exists = self.db.exist_expand(url)
if exists:
shorten = exists.shorten
else:
shorten = self.db.add_url(url).shorten
shorten = web.ctx.homedomain + '/' + shorten
if get_json:
# 返回 json 格式的数据
web.header('Content-Type', 'application/json')
return json.dumps({'shorten': shorten, 'expand': url})
else:
shortens = web.storage({'url': shorten,
'qr_table': self.qrcode_table(shorten),
})
return render.shorten(shortens)
class Expand(object):
"""短网址跳转到相应的长网址"""
def __init__(self):
self.db = db
def get_expand(self, shorten):
result = self.db.get_expand(shorten)
if result:
return result.expand
def GET(self, shorten):
"""解析短网址,并作 301 跳转"""
if not shorten:
return web.seeother('/')
expand = self.get_expand(shorten)
if debug:
print(repr(expand))
if expand:
return web.redirect(expand) # 301 跳转
else:
return web.notfound()
def POST(self):
"""解析短网址,返回 json 数据"""
shorten = web.input(shorten='').shorten.encode('utf8').strip()
web.header('Content-Type', 'application/json')
# 判断是否为有效短网址字符串
if shorten and re.match('[a-zA-Z0-9]{5,}$', str(shorten)):
expand = self.get_expand(shorten)
if debug:
print(repr(expand))
if expand:
shorten = web.ctx.homedomain + '/' + shorten
return json.dumps({'shorten': shorten, 'expand': expand})
else:
return json.dumps({'shorten': '', 'expand': ''})
else:
return json.dumps({'shorten': '', 'expand': ''})
if __name__ == '__main__':
# 下面这条语句用于在服务器端通过 nginx + fastcgi 部署 web.py 应用
# web.wsgi.runwsgi = lambda func, addr=None: web.wsgi.runfcgi(func, addr)
app.run()
| 32.095238 | 79 | 0.512611 |
import json
import re
import web
import sys
sys.path.append("/home/www/ShortURL/shorturl")
from libs.qrcode import QRCode, ErrorCorrectLevel
import settings
import models
debug = web.config.debug = settings.DEBUG
render = web.template.render(settings.TEMPLATE_DIR,
base=settings.BASE_TEMPLATE)
app = web.application(settings.URLS, globals())
db = models.DB(settings.DATABASES)
class Index(object):
def GET(self):
return render.index()
class Shorten(object):
def __init__(self):
self.db = db
def add_scheme(self, url):
scheme2 = re.compile(r'(?i)^[a-z][a-z0-9+.\-]*://')
scheme3 = ('git@', 'mailto:', 'javascript:', 'about:', 'opera:',
'afp:', 'aim:', 'apt:', 'attachment:', 'bitcoin:',
'callto:', 'cid:', 'data:', 'dav:', 'dns:', 'fax:', 'feed:',
'gg:', 'go:', 'gtalk:', 'h323:', 'iax:', 'im:', 'itms:',
'jar:', 'magnet:', 'maps:', 'message:', 'mid:', 'msnim:',
'mvn:', 'news:', 'palm:', 'paparazzi:', 'platform:',
'pres:', 'proxy:', 'psyc:', 'query:', 'session:', 'sip:',
'sips:', 'skype:', 'sms:', 'spotify:', 'steam:', 'tel:',
'things:', 'urn:', 'uuid:', 'view-source:', 'ws:', 'xfire:',
'xmpp:', 'ymsgr:', 'doi:',
)
url_lower = url.lower()
scheme = scheme2.match(url_lower)
if not scheme:
for scheme in scheme3:
url_splits = url_lower.split(scheme)
if len(url_splits) > 1:
break
else:
url = 'http://' + url
return url
def qrcode_table(self, data, type_number=4, error_correct_level='H'):
if error_correct_level == 'L':
error_correct_level = ErrorCorrectLevel.L
elif error_correct_level == 'M':
error_correct_level = ErrorCorrectLevel.M
elif error_correct_level == 'Q':
error_correct_level = ErrorCorrectLevel.Q
else:
error_correct_level = ErrorCorrectLevel.H
qr = QRCode()
qr.setTypeNumber(type_number)
qr.setErrorCorrectLevel(error_correct_level)
qr.addData(data)
qr.make()
html = '<table id="qrcode-table">'
for r in range(qr.getModuleCount()):
html += "<tr>"
for c in range(qr.getModuleCount()):
if qr.isDark(r, c):
html += '<td class="dark" />'
else:
html += '<td class="white" />'
html += '</tr>'
html += '</table>'
return html
def POST(self, get_json=False):
url = web.input(url='').url.strip()
auth_token = web.input(auth='').strip()
print(settings.AUTH_TOKEN)
print(auth_token)
if not url:
return web.badrequest()
url = self.add_scheme(url)
if debug:
print(repr(url))
exists = self.db.exist_expand(url)
if exists:
shorten = exists.shorten
else:
shorten = self.db.add_url(url).shorten
shorten = web.ctx.homedomain + '/' + shorten
if get_json:
web.header('Content-Type', 'application/json')
return json.dumps({'shorten': shorten, 'expand': url})
else:
shortens = web.storage({'url': shorten,
'qr_table': self.qrcode_table(shorten),
})
return render.shorten(shortens)
class Expand(object):
def __init__(self):
self.db = db
def get_expand(self, shorten):
result = self.db.get_expand(shorten)
if result:
return result.expand
def GET(self, shorten):
if not shorten:
return web.seeother('/')
expand = self.get_expand(shorten)
if debug:
print(repr(expand))
if expand:
return web.redirect(expand)
else:
return web.notfound()
def POST(self):
shorten = web.input(shorten='').shorten.encode('utf8').strip()
web.header('Content-Type', 'application/json')
if shorten and re.match('[a-zA-Z0-9]{5,}$', str(shorten)):
expand = self.get_expand(shorten)
if debug:
print(repr(expand))
if expand:
shorten = web.ctx.homedomain + '/' + shorten
return json.dumps({'shorten': shorten, 'expand': expand})
else:
return json.dumps({'shorten': '', 'expand': ''})
else:
return json.dumps({'shorten': '', 'expand': ''})
if __name__ == '__main__':
app.run()
| true | true |
f73c6a81a55c99f9db7d59a04b5a3718fa96108d | 67 | py | Python | test/conc.py | sah-py/vkMod | 84995a3c0a1d54d7bbc802208ee0d6a756a98a7f | [
"MIT"
] | null | null | null | test/conc.py | sah-py/vkMod | 84995a3c0a1d54d7bbc802208ee0d6a756a98a7f | [
"MIT"
] | null | null | null | test/conc.py | sah-py/vkMod | 84995a3c0a1d54d7bbc802208ee0d6a756a98a7f | [
"MIT"
] | null | null | null | z = {0:1, 1:2}
for key, value in z.items():
print(key, value) | 22.333333 | 29 | 0.552239 | z = {0:1, 1:2}
for key, value in z.items():
print(key, value) | true | true |
f73c6ad1f5b296ebe35e87eb7b18d33bd93efc01 | 77,810 | py | Python | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_09_01/operations/_managed_clusters_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_09_01/operations/_managed_clusters_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_09_01/operations/_managed_clusters_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ManagedClustersOperations(object):
"""ManagedClustersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ManagedClusterListResult"]
"""Gets a list of managed clusters in the specified subscription.
Gets a list of managed clusters in the specified subscription. The operation returns properties
of each managed cluster.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ManagedClusterListResult"]
"""Lists managed clusters in the specified subscription and resource group.
Lists managed clusters in the specified subscription and resource group. The operation returns
properties of each managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
def get_upgrade_profile(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedClusterUpgradeProfile"
"""Gets upgrade profile for a managed cluster.
Gets the details of the upgrade profile for a managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_upgrade_profile.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default'} # type: ignore
def get_access_profile(
self,
resource_group_name, # type: str
resource_name, # type: str
role_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedClusterAccessProfile"
"""Gets an access profile of a managed cluster.
Gets the accessProfile for the specified role name of the managed cluster with a specified
resource group and name. **WARNING**\ : This API will be deprecated. Instead use
`ListClusterUserCredentials <https://docs.microsoft.com/en-
us/rest/api/aks/managedclusters/listclusterusercredentials>`_ or `ListClusterAdminCredentials
<https://docs.microsoft.com/en-us/rest/api/aks/managedclusters/listclusteradmincredentials>`_ .
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param role_name: The name of the role for managed cluster accessProfile resource.
:type role_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterAccessProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAccessProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterAccessProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_access_profile.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'roleName': self._serialize.url("role_name", role_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterAccessProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_access_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential'} # type: ignore
def list_cluster_admin_credentials(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CredentialResults"
"""Gets cluster admin credential of a managed cluster.
Gets cluster admin credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_09_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.list_cluster_admin_credentials.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_admin_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential'} # type: ignore
def list_cluster_user_credentials(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CredentialResults"
"""Gets cluster user credential of a managed cluster.
Gets cluster user credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_09_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.list_cluster_user_credentials.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential'} # type: ignore
def list_cluster_monitoring_user_credentials(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CredentialResults"
"""Gets cluster monitoring user credential of a managed cluster.
Gets cluster monitoring user credential of the managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_09_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.list_cluster_monitoring_user_credentials.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_monitoring_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential'} # type: ignore
def get(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedCluster"
"""Gets a managed cluster.
Gets the details of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedCluster, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedCluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedCluster"
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedCluster"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedCluster')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedCluster"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ManagedCluster"]
"""Creates or updates a managed cluster.
Creates or updates a managed cluster with the specified configuration for agents and Kubernetes
version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Create or Update a Managed Cluster operation.
:type parameters: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedCluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedCluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2020_09_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedCluster"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ManagedCluster"]
"""Updates tags on a managed cluster.
Updates a managed cluster with the specified tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Update Managed Cluster Tags operation.
:type parameters: ~azure.mgmt.containerservice.v2020_09_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedCluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2020_09_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a managed cluster.
Deletes the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _reset_service_principal_profile_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedClusterServicePrincipalProfile"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._reset_service_principal_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedClusterServicePrincipalProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_service_principal_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
def begin_reset_service_principal_profile(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedClusterServicePrincipalProfile"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Reset Service Principal Profile of a managed cluster.
Update the service principal Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset Service Principal Profile operation for a
Managed Cluster.
:type parameters: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterServicePrincipalProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_service_principal_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_service_principal_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
def _reset_aad_profile_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedClusterAADProfile"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._reset_aad_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedClusterAADProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_aad_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
def begin_reset_aad_profile(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedClusterAADProfile"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Reset AAD Profile of a managed cluster.
Update the AAD Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset AAD Profile operation for a Managed
Cluster.
:type parameters: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAADProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_aad_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_aad_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
def _rotate_cluster_certificates_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._rotate_cluster_certificates_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rotate_cluster_certificates_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
def begin_rotate_cluster_certificates(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Rotate certificates of a managed cluster.
Rotate certificates of a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._rotate_cluster_certificates_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rotate_cluster_certificates.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stop Managed Cluster.
Stops a Running Managed Cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/stop'} # type: ignore
def _start_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/start'} # type: ignore
def begin_start(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Start Managed Cluster.
Starts a Stopped Managed Cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/start'} # type: ignore
| 51.05643 | 253 | 0.666071 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ManagedClustersOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters'}
def list_by_resource_group(
self,
resource_group_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters'}
def get_upgrade_profile(
self,
resource_group_name,
resource_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.get_upgrade_profile.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default'}
def get_access_profile(
self,
resource_group_name,
resource_name,
role_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.get_access_profile.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'roleName': self._serialize.url("role_name", role_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterAccessProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_access_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential'}
def list_cluster_admin_credentials(
self,
resource_group_name,
resource_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.list_cluster_admin_credentials.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_admin_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential'}
def list_cluster_user_credentials(
self,
resource_group_name,
resource_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.list_cluster_user_credentials.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential'}
def list_cluster_monitoring_user_credentials(
self,
resource_group_name,
resource_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.list_cluster_monitoring_user_credentials.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_monitoring_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential'}
def get(
self,
resource_group_name,
resource_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'}
def _create_or_update_initial(
self,
resource_group_name,
resource_name,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'ManagedCluster')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'}
def begin_create_or_update(
self,
resource_group_name,
resource_name,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'}
def _update_tags_initial(
self,
resource_group_name,
resource_name,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._update_tags_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'}
def begin_update_tags(
self,
resource_group_name,
resource_name,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'}
def _delete_initial(
self,
resource_group_name,
resource_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'}
def begin_delete(
self,
resource_group_name,
resource_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'}
def _reset_service_principal_profile_initial(
self,
resource_group_name,
resource_name,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._reset_service_principal_profile_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'ManagedClusterServicePrincipalProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_service_principal_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'}
def begin_reset_service_principal_profile(
self,
resource_group_name,
resource_name,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._reset_service_principal_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_service_principal_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'}
def _reset_aad_profile_initial(
self,
resource_group_name,
resource_name,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._reset_aad_profile_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'ManagedClusterAADProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_aad_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'}
def begin_reset_aad_profile(
self,
resource_group_name,
resource_name,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._reset_aad_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_aad_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'}
def _rotate_cluster_certificates_initial(
self,
resource_group_name,
resource_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self._rotate_cluster_certificates_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rotate_cluster_certificates_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'}
def begin_rotate_cluster_certificates(
self,
resource_group_name,
resource_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._rotate_cluster_certificates_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rotate_cluster_certificates.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'}
def _stop_initial(
self,
resource_group_name,
resource_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self._stop_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/stop'}
def begin_stop(
self,
resource_group_name,
resource_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/stop'}
def _start_initial(
self,
resource_group_name,
resource_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self._start_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/start'}
def begin_start(
self,
resource_group_name,
resource_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/start'}
| true | true |
f73c6d4973b007fb37f719d2f748aef742b7d6bd | 6,231 | py | Python | code/video_analysis.py | SixSq/tensorflow-lite-object-detector-container | fff3e52dbf16eab93653271ce51839845eae6e73 | [
"Apache-2.0"
] | 2 | 2021-07-30T15:10:37.000Z | 2021-09-25T15:51:55.000Z | code/video_analysis.py | SixSq/tensorflow-lite-object-detector-container | fff3e52dbf16eab93653271ce51839845eae6e73 | [
"Apache-2.0"
] | null | null | null | code/video_analysis.py | SixSq/tensorflow-lite-object-detector-container | fff3e52dbf16eab93653271ce51839845eae6e73 | [
"Apache-2.0"
] | 1 | 2021-09-25T15:55:35.000Z | 2021-09-25T15:55:35.000Z | #!/usr/bin/env python3
# coding:utf-8
import os
import cv2
import time
import utils
import threading
import collections
import requests
from detect_objects import ObjectDetector
#from profilehooks import profile # pip install profilehooks
class Fps(object):
def __init__(self, buffer_size=15):
self.last_frames_ts = collections.deque(maxlen=buffer_size)
self.lock = threading.Lock()
def __call__(self):
with self.lock:
len_ts = self._len_ts()
if len_ts >= 2:
return len_ts / (self._newest_ts() - self._oldest_ts())
return None
def _len_ts(self):
return len(self.last_frames_ts)
def _oldest_ts(self):
return self.last_frames_ts[0]
def _newest_ts(self):
return self.last_frames_ts[-1]
def new_frame(self):
with self.lock:
self.last_frames_ts.append(time.time())
def get_fps(self):
return self()
class VideoAnalysis(object):
__metaclass__ = utils.Singleton
def __init__(self, input_source=0, quality=80, width=1280, height=720, threads=0, history_size=3,
model='model.tflite', labels='labels.txt', threshold=0.5, include_labels=None,
mqtt_broker=None, mqtt_topic='default'):
self.quality = quality
self.video_analysis = ObjectDetector(model, labels, input_source,
width=width, height=height, history_size=history_size,
threshold=threshold, include_labels=include_labels,
mqtt_broker=mqtt_broker, mqtt_topic=mqtt_topic)
self.font = cv2.FONT_HERSHEY_SIMPLEX
self.camera_fps = Fps(50)
self.network_fps = Fps(25)
self.analysis_fps = Fps(15)
self.video_analysis_queue = utils.RenewQueue()
self.prepare_frame_queue = utils.RenewQueue()
self.request_image_queue = utils.RenewQueue()
self.video_analysis_threads_number = threads
self.get_frame_thread = threading.Thread(target=self.run_get_frame, name='get_frame')
self.get_frame_thread.daemon = True
self.get_frame_thread.start()
self.prepare_frame_thread = threading.Thread(target=self.run_prepare_frame, name='prepare_frame')
self.prepare_frame_thread.daemon = True
self.prepare_frame_thread.start()
self.video_analysis_threads = [threading.Thread(target=self.run_video_analysis, name='video_analysis#%i' % (i+1,))
for i in range(self.video_analysis_threads_number)]
for thread in self.video_analysis_threads:
thread.daemon=True
thread.start()
def __del__(self):
pass
def run_get_frame(self):
last_ts = 0
while True:
frame = self.get_frame()
if frame is None:
return
ts = time.time()
#if (ts - last_ts) < 0.2:
# continue
last_ts = ts
self.video_analysis_queue.put(frame.copy())
self.prepare_frame_queue.put(frame.copy())
def run_prepare_frame(self):
while True:
frame = self.prepare_frame_queue.get()
self.prepare_frame(frame)
image = self.encode_frame_to_jpeg(frame)
self.request_image_queue.put(image)
def run_video_analysis(self):
while True:
frame = self.video_analysis_queue.get()
self.do_video_analysis(frame)
#@profile
def do_video_analysis(self, frame):
self.video_analysis.process_frame(frame)
self.analysis_fps.new_frame()
def draw_video_analysis_overlay(self, frame):
self.video_analysis.draw_overlay(frame)
def draw_fps(self, frame):
height = frame.shape[0]
height = frame.shape[0]
camera_fps = self.camera_fps()
if camera_fps is not None:
cv2.putText(frame, '{:5.2f} camera fps'.format(camera_fps),
(10,height-50), self.font, 0.8, (250,25,250), 2)
network_fps = self.network_fps()
if network_fps is not None:
cv2.putText(frame, '{:5.2f} effective fps'.format(network_fps),
(10,height-30), self.font, 0.8, (250,25,250), 2)
analysis_fps = self.analysis_fps()
if analysis_fps is not None:
cv2.putText(frame, '{:5.2f} analysis fps'.format(analysis_fps),
(10,height-10), self.font, 0.8, (250,25,250), 2)
def draw_date(self, frame):
cv2.putText(frame, time.strftime("%c"), (10,20), self.font, 0.6,
(250,25,250), 2)
#@profile
def get_frame(self):
success, frame = self.video_analysis.get_next_video_frame()
self.camera_fps.new_frame()
return frame
#@profile
def encode_frame_to_jpeg(self, frame):
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
ret, jpeg = cv2.imencode('.jpg', frame,
(cv2.IMWRITE_JPEG_QUALITY, self.quality))
return jpeg.tobytes()
#@profile
def prepare_frame(self, frame):
self.draw_video_analysis_overlay(frame)
self.draw_fps(frame)
self.draw_date(frame)
#@profile
def request_image(self):
image = self.request_image_queue.get()
self.network_fps.new_frame()
return image
# Not used. Old synchronous version
def get_image(self):
frame = self.get_frame()
self.do_video_analysis(frame)
self.draw_fps(frame)
self.draw_date(frame)
self.draw_video_analysis_overlay(frame)
return self.encode_frame_to_jpeg(frame)
def mjpeg_generator(self):
"""Video streaming generator function."""
while True:
image = self.request_image()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + image + b'\r\n')
def main():
#VideoAnalysis().request_image()
pass
if __name__ == "__main__":
main()
| 31.790816 | 122 | 0.609533 |
import os
import cv2
import time
import utils
import threading
import collections
import requests
from detect_objects import ObjectDetector
def __init__(self, buffer_size=15):
self.last_frames_ts = collections.deque(maxlen=buffer_size)
self.lock = threading.Lock()
def __call__(self):
with self.lock:
len_ts = self._len_ts()
if len_ts >= 2:
return len_ts / (self._newest_ts() - self._oldest_ts())
return None
def _len_ts(self):
return len(self.last_frames_ts)
def _oldest_ts(self):
return self.last_frames_ts[0]
def _newest_ts(self):
return self.last_frames_ts[-1]
def new_frame(self):
with self.lock:
self.last_frames_ts.append(time.time())
def get_fps(self):
return self()
class VideoAnalysis(object):
__metaclass__ = utils.Singleton
def __init__(self, input_source=0, quality=80, width=1280, height=720, threads=0, history_size=3,
model='model.tflite', labels='labels.txt', threshold=0.5, include_labels=None,
mqtt_broker=None, mqtt_topic='default'):
self.quality = quality
self.video_analysis = ObjectDetector(model, labels, input_source,
width=width, height=height, history_size=history_size,
threshold=threshold, include_labels=include_labels,
mqtt_broker=mqtt_broker, mqtt_topic=mqtt_topic)
self.font = cv2.FONT_HERSHEY_SIMPLEX
self.camera_fps = Fps(50)
self.network_fps = Fps(25)
self.analysis_fps = Fps(15)
self.video_analysis_queue = utils.RenewQueue()
self.prepare_frame_queue = utils.RenewQueue()
self.request_image_queue = utils.RenewQueue()
self.video_analysis_threads_number = threads
self.get_frame_thread = threading.Thread(target=self.run_get_frame, name='get_frame')
self.get_frame_thread.daemon = True
self.get_frame_thread.start()
self.prepare_frame_thread = threading.Thread(target=self.run_prepare_frame, name='prepare_frame')
self.prepare_frame_thread.daemon = True
self.prepare_frame_thread.start()
self.video_analysis_threads = [threading.Thread(target=self.run_video_analysis, name='video_analysis#%i' % (i+1,))
for i in range(self.video_analysis_threads_number)]
for thread in self.video_analysis_threads:
thread.daemon=True
thread.start()
def __del__(self):
pass
def run_get_frame(self):
last_ts = 0
while True:
frame = self.get_frame()
if frame is None:
return
ts = time.time()
last_ts = ts
self.video_analysis_queue.put(frame.copy())
self.prepare_frame_queue.put(frame.copy())
def run_prepare_frame(self):
while True:
frame = self.prepare_frame_queue.get()
self.prepare_frame(frame)
image = self.encode_frame_to_jpeg(frame)
self.request_image_queue.put(image)
def run_video_analysis(self):
while True:
frame = self.video_analysis_queue.get()
self.do_video_analysis(frame)
def do_video_analysis(self, frame):
self.video_analysis.process_frame(frame)
self.analysis_fps.new_frame()
def draw_video_analysis_overlay(self, frame):
self.video_analysis.draw_overlay(frame)
def draw_fps(self, frame):
height = frame.shape[0]
height = frame.shape[0]
camera_fps = self.camera_fps()
if camera_fps is not None:
cv2.putText(frame, '{:5.2f} camera fps'.format(camera_fps),
(10,height-50), self.font, 0.8, (250,25,250), 2)
network_fps = self.network_fps()
if network_fps is not None:
cv2.putText(frame, '{:5.2f} effective fps'.format(network_fps),
(10,height-30), self.font, 0.8, (250,25,250), 2)
analysis_fps = self.analysis_fps()
if analysis_fps is not None:
cv2.putText(frame, '{:5.2f} analysis fps'.format(analysis_fps),
(10,height-10), self.font, 0.8, (250,25,250), 2)
def draw_date(self, frame):
cv2.putText(frame, time.strftime("%c"), (10,20), self.font, 0.6,
(250,25,250), 2)
def get_frame(self):
success, frame = self.video_analysis.get_next_video_frame()
self.camera_fps.new_frame()
return frame
def encode_frame_to_jpeg(self, frame):
ret, jpeg = cv2.imencode('.jpg', frame,
(cv2.IMWRITE_JPEG_QUALITY, self.quality))
return jpeg.tobytes()
def prepare_frame(self, frame):
self.draw_video_analysis_overlay(frame)
self.draw_fps(frame)
self.draw_date(frame)
def request_image(self):
image = self.request_image_queue.get()
self.network_fps.new_frame()
return image
def get_image(self):
frame = self.get_frame()
self.do_video_analysis(frame)
self.draw_fps(frame)
self.draw_date(frame)
self.draw_video_analysis_overlay(frame)
return self.encode_frame_to_jpeg(frame)
def mjpeg_generator(self):
while True:
image = self.request_image()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + image + b'\r\n')
def main():
pass
if __name__ == "__main__":
main()
| true | true |
f73c6d5c679a3d159915561228b8a11dcf3f8abe | 49,146 | py | Python | contacts_and_people/migrations/0001_initial.py | techdragon/Arkestra | 8dad01982339f9d702f5ed6d58179b3a90aff193 | [
"BSD-2-Clause"
] | 1 | 2019-06-27T13:05:16.000Z | 2019-06-27T13:05:16.000Z | contacts_and_people/migrations/0001_initial.py | techdragon/Arkestra | 8dad01982339f9d702f5ed6d58179b3a90aff193 | [
"BSD-2-Clause"
] | null | null | null | contacts_and_people/migrations/0001_initial.py | techdragon/Arkestra | 8dad01982339f9d702f5ed6d58179b3a90aff193 | [
"BSD-2-Clause"
] | null | null | null | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Site'
db.create_table('contacts_and_people_site', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('site_name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('post_town', self.gf('django.db.models.fields.CharField')(max_length=50)),
('country', self.gf('django.db.models.fields.CharField')(max_length=50)),
('description', self.gf('django.db.models.fields.TextField')(max_length=500, null=True, blank=True)),
))
db.send_create_signal('contacts_and_people', ['Site'])
# Adding model 'Building'
db.create_table('contacts_and_people_building', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('number', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('street', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('additional_street_address', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('postcode', self.gf('django.db.models.fields.CharField')(max_length=9, null=True, blank=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contacts_and_people.Site'])),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=255, unique=True, null=True, blank=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.Image'], null=True, blank=True)),
('summary', self.gf('django.db.models.fields.TextField')(default='', max_length=256)),
('description', self.gf('django.db.models.fields.related.ForeignKey')(related_name='building_description', null=True, to=orm['cms.Placeholder'])),
('getting_here', self.gf('django.db.models.fields.related.ForeignKey')(related_name='getting_here', null=True, to=orm['cms.Placeholder'])),
('access_and_parking', self.gf('django.db.models.fields.related.ForeignKey')(related_name='building_access_and_parking', null=True, to=orm['cms.Placeholder'])),
('map', self.gf('django.db.models.fields.BooleanField')(default=False)),
('latitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('longitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('zoom', self.gf('django.db.models.fields.IntegerField')(default=17, null=True, blank=True)),
))
db.send_create_signal('contacts_and_people', ['Building'])
# Adding model 'PhoneContact'
db.create_table('contacts_and_people_phonecontact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('label', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('country_code', self.gf('django.db.models.fields.CharField')(default='44', max_length=5)),
('area_code', self.gf('django.db.models.fields.CharField')(default='029', max_length=5)),
('number', self.gf('django.db.models.fields.CharField')(max_length=12)),
('internal_extension', self.gf('django.db.models.fields.CharField')(max_length=6, null=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
))
db.send_create_signal('contacts_and_people', ['PhoneContact'])
# Adding model 'EntityLite'
db.create_table('contacts_and_people_entitylite', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('contacts_and_people', ['EntityLite'])
# Adding model 'Entity'
db.create_table('contacts_and_people_entity', (
('entitylite_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['contacts_and_people.EntityLite'], unique=True, primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('external_url', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='entity_item', null=True, to=orm['links.ExternalLink'])),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, unique=True, max_length=60, blank=True)),
('precise_location', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('access_note', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.Image'], null=True, blank=True)),
('short_name', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('abstract_entity', self.gf('django.db.models.fields.BooleanField')(default=False)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['contacts_and_people.Entity'])),
('display_parent', self.gf('django.db.models.fields.BooleanField')(default=True)),
('building_recapitulates_entity_name', self.gf('django.db.models.fields.BooleanField')(default=False)),
('building', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contacts_and_people.Building'], null=True, blank=True)),
('website', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='entity', unique=True, null=True, to=orm['cms.Page'])),
('auto_news_page', self.gf('django.db.models.fields.BooleanField')(default=False)),
('news_page_menu_title', self.gf('django.db.models.fields.CharField')(default='News & events', max_length=50)),
('news_page_intro', self.gf('django.db.models.fields.related.ForeignKey')(related_name='news_page_intro', null=True, to=orm['cms.Placeholder'])),
('auto_contacts_page', self.gf('django.db.models.fields.BooleanField')(default=False)),
('contacts_page_menu_title', self.gf('django.db.models.fields.CharField')(default='Contacts & people', max_length=50)),
('contacts_page_intro', self.gf('django.db.models.fields.related.ForeignKey')(related_name='contacts_page_intro', null=True, to=orm['cms.Placeholder'])),
('auto_vacancies_page', self.gf('django.db.models.fields.BooleanField')(default=False)),
('vacancies_page_menu_title', self.gf('django.db.models.fields.CharField')(default='Vacancies & studentships', max_length=50)),
('vacancies_page_intro', self.gf('django.db.models.fields.related.ForeignKey')(related_name='vacancies_page_intro', null=True, to=orm['cms.Placeholder'])),
# ('auto_publications_page', self.gf('django.db.models.fields.BooleanField')(default=False)),
# ('publications_page_menu_title', self.gf('django.db.models.fields.CharField')(default='Publications', max_length=50)),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('contacts_and_people', ['Entity'])
# Adding model 'Title'
db.create_table('contacts_and_people_title', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('abbreviation', self.gf('django.db.models.fields.CharField')(unique=True, max_length=20)),
))
db.send_create_signal('contacts_and_people', ['Title'])
# Adding model 'PersonLite'
db.create_table('contacts_and_people_personlite', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contacts_and_people.Title'], to_field='abbreviation', null=True, blank=True)),
('given_name', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('middle_names', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('surname', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('contacts_and_people', ['PersonLite'])
# Adding model 'Person'
db.create_table('contacts_and_people_person', (
('personlite_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['contacts_and_people.PersonLite'], unique=True, primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('external_url', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='person_item', null=True, to=orm['links.ExternalLink'])),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, unique=True, max_length=60, blank=True)),
('precise_location', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('access_note', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.Image'], null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='person_user', unique=True, null=True, to=orm['auth.User'])),
('institutional_username', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('description', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Placeholder'], null=True)),
('building', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contacts_and_people.Building'], null=True, blank=True)),
('override_entity', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='people_override', null=True, to=orm['contacts_and_people.Entity'])),
('please_contact', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='contact_for', null=True, to=orm['contacts_and_people.Person'])),
('staff_id', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
('data_feed_locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('contacts_and_people', ['Person'])
# Adding model 'Teacher'
db.create_table('contacts_and_people_teacher', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('person', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='teacher', unique=True, null=True, to=orm['contacts_and_people.Person'])),
('dummy_field_one', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('dummy_field_two', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal('contacts_and_people', ['Teacher'])
# Adding model 'Membership'
db.create_table('contacts_and_people_membership', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('person', self.gf('django.db.models.fields.related.ForeignKey')(related_name='member_of', to=orm['contacts_and_people.Person'])),
('entity', self.gf('django.db.models.fields.related.ForeignKey')(related_name='members', to=orm['contacts_and_people.Entity'])),
('display_role', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='display_roles', null=True, to=orm['contacts_and_people.Membership'])),
('key_contact', self.gf('django.db.models.fields.BooleanField')(default=False)),
('role', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('importance_to_person', self.gf('django.db.models.fields.IntegerField')(default=1, null=True, blank=True)),
('importance_to_entity', self.gf('django.db.models.fields.IntegerField')(default=1, null=True, blank=True)),
))
db.send_create_signal('contacts_and_people', ['Membership'])
# Adding model 'EntityAutoPageLinkPluginEditor'
db.create_table('cmsplugin_entityautopagelinkplugineditor', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('link_to', self.gf('django.db.models.fields.CharField')(max_length=50)),
('entity', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='auto_page_plugin', null=True, to=orm['contacts_and_people.Entity'])),
('text_override', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
))
db.send_create_signal('contacts_and_people', ['EntityAutoPageLinkPluginEditor'])
# Adding model 'EntityDirectoryPluginEditor'
db.create_table('cmsplugin_entitydirectoryplugineditor', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('entity', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='directory_plugin', null=True, to=orm['contacts_and_people.Entity'])),
('levels', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('display_descriptions_to_level', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0, null=True, blank=True)),
('link_icons', self.gf('django.db.models.fields.BooleanField')(default=True)),
('use_short_names', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('contacts_and_people', ['EntityDirectoryPluginEditor'])
# Adding model 'EntityMembersPluginEditor'
db.create_table('cmsplugin_entitymembersplugineditor', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('entity', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='entity_members_plugin', null=True, to=orm['contacts_and_people.Entity'])),
))
db.send_create_signal('contacts_and_people', ['EntityMembersPluginEditor'])
def backwards(self, orm):
# Deleting model 'Site'
db.delete_table('contacts_and_people_site')
# Deleting model 'Building'
db.delete_table('contacts_and_people_building')
# Deleting model 'PhoneContact'
db.delete_table('contacts_and_people_phonecontact')
# Deleting model 'EntityLite'
db.delete_table('contacts_and_people_entitylite')
# Deleting model 'Entity'
db.delete_table('contacts_and_people_entity')
# Deleting model 'Title'
db.delete_table('contacts_and_people_title')
# Deleting model 'PersonLite'
db.delete_table('contacts_and_people_personlite')
# Deleting model 'Person'
db.delete_table('contacts_and_people_person')
# Deleting model 'Teacher'
db.delete_table('contacts_and_people_teacher')
# Deleting model 'Membership'
db.delete_table('contacts_and_people_membership')
# Deleting model 'EntityAutoPageLinkPluginEditor'
db.delete_table('cmsplugin_entityautopagelinkplugineditor')
# Deleting model 'EntityDirectoryPluginEditor'
db.delete_table('cmsplugin_entitydirectoryplugineditor')
# Deleting model 'EntityMembersPluginEditor'
db.delete_table('cmsplugin_entitymembersplugineditor')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'page_flags': ('django.db.models.fields.TextField', [], {'null': True, 'blank': True}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contacts_and_people.building': {
'Meta': {'ordering': "('site', 'street', 'number', 'name')", 'object_name': 'Building'},
'access_and_parking': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'building_access_and_parking'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'additional_street_address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'building_description'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'getting_here': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'getting_here'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'map': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '9', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '256'}),
'zoom': ('django.db.models.fields.IntegerField', [], {'default': '17', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.entity': {
'Meta': {'ordering': "['tree_id', 'lft']", 'object_name': 'Entity', '_ormbases': ['contacts_and_people.EntityLite']},
'abstract_entity': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'access_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'auto_contacts_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_news_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_publications_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_vacancies_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Building']", 'null': 'True', 'blank': 'True'}),
'building_recapitulates_entity_name': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contacts_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'contacts_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Contacts & people'", 'max_length': '50'}),
'display_parent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'entitylite_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.EntityLite']", 'unique': 'True', 'primary_key': 'True'}),
'external_url': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'entity_item'", 'null': 'True', 'to': "orm['links.ExternalLink']"}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'news_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'news_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'news_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'News & events'", 'max_length': '50'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'precise_location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'publications_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Publications'", 'max_length': '50'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '60', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'vacancies_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vacancies_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'vacancies_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Vacancies & studentships'", 'max_length': '50'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'entity'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"})
},
'contacts_and_people.entityautopagelinkplugineditor': {
'Meta': {'object_name': 'EntityAutoPageLinkPluginEditor', 'db_table': "'cmsplugin_entityautopagelinkplugineditor'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'auto_page_plugin'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'link_to': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'text_override': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.entitydirectoryplugineditor': {
'Meta': {'object_name': 'EntityDirectoryPluginEditor', 'db_table': "'cmsplugin_entitydirectoryplugineditor'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'display_descriptions_to_level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'directory_plugin'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'levels': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'link_icons': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'use_short_names': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'contacts_and_people.entitylite': {
'Meta': {'object_name': 'EntityLite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'contacts_and_people.entitymembersplugineditor': {
'Meta': {'object_name': 'EntityMembersPluginEditor', 'db_table': "'cmsplugin_entitymembersplugineditor'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'entity_members_plugin'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"})
},
'contacts_and_people.membership': {
'Meta': {'ordering': "('-importance_to_entity', 'person__surname')", 'object_name': 'Membership'},
'display_role': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'display_roles'", 'null': 'True', 'to': "orm['contacts_and_people.Membership']"}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['contacts_and_people.Entity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance_to_entity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'importance_to_person': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'key_contact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_of'", 'to': "orm['contacts_and_people.Person']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.person': {
'Meta': {'ordering': "['surname', 'given_name', 'user']", 'object_name': 'Person', '_ormbases': ['contacts_and_people.PersonLite']},
'access_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Building']", 'null': 'True', 'blank': 'True'}),
'data_feed_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'entities': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'people'", 'to': "orm['contacts_and_people.Entity']", 'through': "orm['contacts_and_people.Membership']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'external_url': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person_item'", 'null': 'True', 'to': "orm['links.ExternalLink']"}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'institutional_username': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'override_entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'people_override'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'personlite_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.PersonLite']", 'unique': 'True', 'primary_key': 'True'}),
'please_contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contact_for'", 'null': 'True', 'to': "orm['contacts_and_people.Person']"}),
'precise_location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '60', 'blank': 'True'}),
'staff_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person_user'", 'unique': 'True', 'null': 'True', 'to': "orm['auth.User']"})
},
'contacts_and_people.personlite': {
'Meta': {'object_name': 'PersonLite'},
'given_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'middle_names': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Title']", 'to_field': "'abbreviation'", 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.phonecontact': {
'Meta': {'ordering': "('label',)", 'object_name': 'PhoneContact'},
'area_code': ('django.db.models.fields.CharField', [], {'default': "'029'", 'max_length': '5'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'country_code': ('django.db.models.fields.CharField', [], {'default': "'44'", 'max_length': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_extension': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'contacts_and_people.site': {
'Meta': {'ordering': "('country', 'site_name', 'post_town')", 'object_name': 'Site'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_town': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'site_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contacts_and_people.teacher': {
'Meta': {'object_name': 'Teacher'},
'dummy_field_one': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'dummy_field_two': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'teacher'", 'unique': 'True', 'null': 'True', 'to': "orm['contacts_and_people.Person']"})
},
'contacts_and_people.title': {
'Meta': {'ordering': "['title']", 'object_name': 'Title'},
'abbreviation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'links.externallink': {
'Meta': {'ordering': "['title']", 'object_name': 'ExternalLink'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'external_site': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'to': "orm['links.ExternalSite']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'to': "orm['links.LinkType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'links.externalsite': {
'Meta': {'ordering': "['site']", 'object_name': 'ExternalSite'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['links.ExternalSite']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'links.linktype': {
'Meta': {'object_name': 'LinkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'scheme': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['contacts_and_people']
| 89.846435 | 262 | 0.60892 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_table('contacts_and_people_site', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('site_name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('post_town', self.gf('django.db.models.fields.CharField')(max_length=50)),
('country', self.gf('django.db.models.fields.CharField')(max_length=50)),
('description', self.gf('django.db.models.fields.TextField')(max_length=500, null=True, blank=True)),
))
db.send_create_signal('contacts_and_people', ['Site'])
db.create_table('contacts_and_people_building', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('number', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('street', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('additional_street_address', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('postcode', self.gf('django.db.models.fields.CharField')(max_length=9, null=True, blank=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contacts_and_people.Site'])),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=255, unique=True, null=True, blank=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.Image'], null=True, blank=True)),
('summary', self.gf('django.db.models.fields.TextField')(default='', max_length=256)),
('description', self.gf('django.db.models.fields.related.ForeignKey')(related_name='building_description', null=True, to=orm['cms.Placeholder'])),
('getting_here', self.gf('django.db.models.fields.related.ForeignKey')(related_name='getting_here', null=True, to=orm['cms.Placeholder'])),
('access_and_parking', self.gf('django.db.models.fields.related.ForeignKey')(related_name='building_access_and_parking', null=True, to=orm['cms.Placeholder'])),
('map', self.gf('django.db.models.fields.BooleanField')(default=False)),
('latitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('longitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('zoom', self.gf('django.db.models.fields.IntegerField')(default=17, null=True, blank=True)),
))
db.send_create_signal('contacts_and_people', ['Building'])
db.create_table('contacts_and_people_phonecontact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('label', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('country_code', self.gf('django.db.models.fields.CharField')(default='44', max_length=5)),
('area_code', self.gf('django.db.models.fields.CharField')(default='029', max_length=5)),
('number', self.gf('django.db.models.fields.CharField')(max_length=12)),
('internal_extension', self.gf('django.db.models.fields.CharField')(max_length=6, null=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
))
db.send_create_signal('contacts_and_people', ['PhoneContact'])
db.create_table('contacts_and_people_entitylite', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('contacts_and_people', ['EntityLite'])
db.create_table('contacts_and_people_entity', (
('entitylite_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['contacts_and_people.EntityLite'], unique=True, primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('external_url', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='entity_item', null=True, to=orm['links.ExternalLink'])),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, unique=True, max_length=60, blank=True)),
('precise_location', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('access_note', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.Image'], null=True, blank=True)),
('short_name', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('abstract_entity', self.gf('django.db.models.fields.BooleanField')(default=False)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['contacts_and_people.Entity'])),
('display_parent', self.gf('django.db.models.fields.BooleanField')(default=True)),
('building_recapitulates_entity_name', self.gf('django.db.models.fields.BooleanField')(default=False)),
('building', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contacts_and_people.Building'], null=True, blank=True)),
('website', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='entity', unique=True, null=True, to=orm['cms.Page'])),
('auto_news_page', self.gf('django.db.models.fields.BooleanField')(default=False)),
('news_page_menu_title', self.gf('django.db.models.fields.CharField')(default='News & events', max_length=50)),
('news_page_intro', self.gf('django.db.models.fields.related.ForeignKey')(related_name='news_page_intro', null=True, to=orm['cms.Placeholder'])),
('auto_contacts_page', self.gf('django.db.models.fields.BooleanField')(default=False)),
('contacts_page_menu_title', self.gf('django.db.models.fields.CharField')(default='Contacts & people', max_length=50)),
('contacts_page_intro', self.gf('django.db.models.fields.related.ForeignKey')(related_name='contacts_page_intro', null=True, to=orm['cms.Placeholder'])),
('auto_vacancies_page', self.gf('django.db.models.fields.BooleanField')(default=False)),
('vacancies_page_menu_title', self.gf('django.db.models.fields.CharField')(default='Vacancies & studentships', max_length=50)),
('vacancies_page_intro', self.gf('django.db.models.fields.related.ForeignKey')(related_name='vacancies_page_intro', null=True, to=orm['cms.Placeholder'])),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('contacts_and_people', ['Entity'])
db.create_table('contacts_and_people_title', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('abbreviation', self.gf('django.db.models.fields.CharField')(unique=True, max_length=20)),
))
db.send_create_signal('contacts_and_people', ['Title'])
db.create_table('contacts_and_people_personlite', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contacts_and_people.Title'], to_field='abbreviation', null=True, blank=True)),
('given_name', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('middle_names', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('surname', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('contacts_and_people', ['PersonLite'])
db.create_table('contacts_and_people_person', (
('personlite_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['contacts_and_people.PersonLite'], unique=True, primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('external_url', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='person_item', null=True, to=orm['links.ExternalLink'])),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, unique=True, max_length=60, blank=True)),
('precise_location', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('access_note', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.Image'], null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='person_user', unique=True, null=True, to=orm['auth.User'])),
('institutional_username', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('description', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Placeholder'], null=True)),
('building', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contacts_and_people.Building'], null=True, blank=True)),
('override_entity', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='people_override', null=True, to=orm['contacts_and_people.Entity'])),
('please_contact', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='contact_for', null=True, to=orm['contacts_and_people.Person'])),
('staff_id', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
('data_feed_locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('contacts_and_people', ['Person'])
db.create_table('contacts_and_people_teacher', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('person', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='teacher', unique=True, null=True, to=orm['contacts_and_people.Person'])),
('dummy_field_one', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('dummy_field_two', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal('contacts_and_people', ['Teacher'])
db.create_table('contacts_and_people_membership', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('person', self.gf('django.db.models.fields.related.ForeignKey')(related_name='member_of', to=orm['contacts_and_people.Person'])),
('entity', self.gf('django.db.models.fields.related.ForeignKey')(related_name='members', to=orm['contacts_and_people.Entity'])),
('display_role', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='display_roles', null=True, to=orm['contacts_and_people.Membership'])),
('key_contact', self.gf('django.db.models.fields.BooleanField')(default=False)),
('role', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('importance_to_person', self.gf('django.db.models.fields.IntegerField')(default=1, null=True, blank=True)),
('importance_to_entity', self.gf('django.db.models.fields.IntegerField')(default=1, null=True, blank=True)),
))
db.send_create_signal('contacts_and_people', ['Membership'])
db.create_table('cmsplugin_entityautopagelinkplugineditor', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('link_to', self.gf('django.db.models.fields.CharField')(max_length=50)),
('entity', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='auto_page_plugin', null=True, to=orm['contacts_and_people.Entity'])),
('text_override', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
))
db.send_create_signal('contacts_and_people', ['EntityAutoPageLinkPluginEditor'])
db.create_table('cmsplugin_entitydirectoryplugineditor', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('entity', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='directory_plugin', null=True, to=orm['contacts_and_people.Entity'])),
('levels', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('display_descriptions_to_level', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0, null=True, blank=True)),
('link_icons', self.gf('django.db.models.fields.BooleanField')(default=True)),
('use_short_names', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('contacts_and_people', ['EntityDirectoryPluginEditor'])
db.create_table('cmsplugin_entitymembersplugineditor', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('entity', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='entity_members_plugin', null=True, to=orm['contacts_and_people.Entity'])),
))
db.send_create_signal('contacts_and_people', ['EntityMembersPluginEditor'])
def backwards(self, orm):
db.delete_table('contacts_and_people_site')
db.delete_table('contacts_and_people_building')
db.delete_table('contacts_and_people_phonecontact')
db.delete_table('contacts_and_people_entitylite')
db.delete_table('contacts_and_people_entity')
db.delete_table('contacts_and_people_title')
db.delete_table('contacts_and_people_personlite')
db.delete_table('contacts_and_people_person')
db.delete_table('contacts_and_people_teacher')
db.delete_table('contacts_and_people_membership')
db.delete_table('cmsplugin_entityautopagelinkplugineditor')
db.delete_table('cmsplugin_entitydirectoryplugineditor')
db.delete_table('cmsplugin_entitymembersplugineditor')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'page_flags': ('django.db.models.fields.TextField', [], {'null': True, 'blank': True}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contacts_and_people.building': {
'Meta': {'ordering': "('site', 'street', 'number', 'name')", 'object_name': 'Building'},
'access_and_parking': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'building_access_and_parking'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'additional_street_address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'building_description'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'getting_here': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'getting_here'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'map': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '9', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '256'}),
'zoom': ('django.db.models.fields.IntegerField', [], {'default': '17', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.entity': {
'Meta': {'ordering': "['tree_id', 'lft']", 'object_name': 'Entity', '_ormbases': ['contacts_and_people.EntityLite']},
'abstract_entity': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'access_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'auto_contacts_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_news_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_publications_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_vacancies_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Building']", 'null': 'True', 'blank': 'True'}),
'building_recapitulates_entity_name': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contacts_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'contacts_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Contacts & people'", 'max_length': '50'}),
'display_parent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'entitylite_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.EntityLite']", 'unique': 'True', 'primary_key': 'True'}),
'external_url': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'entity_item'", 'null': 'True', 'to': "orm['links.ExternalLink']"}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'news_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'news_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'news_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'News & events'", 'max_length': '50'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'precise_location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'publications_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Publications'", 'max_length': '50'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '60', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'vacancies_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vacancies_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'vacancies_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Vacancies & studentships'", 'max_length': '50'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'entity'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"})
},
'contacts_and_people.entityautopagelinkplugineditor': {
'Meta': {'object_name': 'EntityAutoPageLinkPluginEditor', 'db_table': "'cmsplugin_entityautopagelinkplugineditor'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'auto_page_plugin'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'link_to': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'text_override': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.entitydirectoryplugineditor': {
'Meta': {'object_name': 'EntityDirectoryPluginEditor', 'db_table': "'cmsplugin_entitydirectoryplugineditor'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'display_descriptions_to_level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'directory_plugin'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'levels': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'link_icons': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'use_short_names': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'contacts_and_people.entitylite': {
'Meta': {'object_name': 'EntityLite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'contacts_and_people.entitymembersplugineditor': {
'Meta': {'object_name': 'EntityMembersPluginEditor', 'db_table': "'cmsplugin_entitymembersplugineditor'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'entity_members_plugin'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"})
},
'contacts_and_people.membership': {
'Meta': {'ordering': "('-importance_to_entity', 'person__surname')", 'object_name': 'Membership'},
'display_role': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'display_roles'", 'null': 'True', 'to': "orm['contacts_and_people.Membership']"}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['contacts_and_people.Entity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance_to_entity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'importance_to_person': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'key_contact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_of'", 'to': "orm['contacts_and_people.Person']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.person': {
'Meta': {'ordering': "['surname', 'given_name', 'user']", 'object_name': 'Person', '_ormbases': ['contacts_and_people.PersonLite']},
'access_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Building']", 'null': 'True', 'blank': 'True'}),
'data_feed_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'entities': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'people'", 'to': "orm['contacts_and_people.Entity']", 'through': "orm['contacts_and_people.Membership']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'external_url': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person_item'", 'null': 'True', 'to': "orm['links.ExternalLink']"}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'institutional_username': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'override_entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'people_override'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'personlite_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.PersonLite']", 'unique': 'True', 'primary_key': 'True'}),
'please_contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contact_for'", 'null': 'True', 'to': "orm['contacts_and_people.Person']"}),
'precise_location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '60', 'blank': 'True'}),
'staff_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person_user'", 'unique': 'True', 'null': 'True', 'to': "orm['auth.User']"})
},
'contacts_and_people.personlite': {
'Meta': {'object_name': 'PersonLite'},
'given_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'middle_names': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Title']", 'to_field': "'abbreviation'", 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.phonecontact': {
'Meta': {'ordering': "('label',)", 'object_name': 'PhoneContact'},
'area_code': ('django.db.models.fields.CharField', [], {'default': "'029'", 'max_length': '5'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'country_code': ('django.db.models.fields.CharField', [], {'default': "'44'", 'max_length': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_extension': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'contacts_and_people.site': {
'Meta': {'ordering': "('country', 'site_name', 'post_town')", 'object_name': 'Site'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_town': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'site_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contacts_and_people.teacher': {
'Meta': {'object_name': 'Teacher'},
'dummy_field_one': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'dummy_field_two': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'teacher'", 'unique': 'True', 'null': 'True', 'to': "orm['contacts_and_people.Person']"})
},
'contacts_and_people.title': {
'Meta': {'ordering': "['title']", 'object_name': 'Title'},
'abbreviation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'links.externallink': {
'Meta': {'ordering': "['title']", 'object_name': 'ExternalLink'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'external_site': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'to': "orm['links.ExternalSite']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'to': "orm['links.LinkType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'links.externalsite': {
'Meta': {'ordering': "['site']", 'object_name': 'ExternalSite'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['links.ExternalSite']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'links.linktype': {
'Meta': {'object_name': 'LinkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'scheme': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['contacts_and_people']
| true | true |
f73c6d7d0f9b5d19dbc647d9a5e7641f9176d530 | 3,365 | py | Python | continuous_Dice_coefficient.py | rubyshamir/cDC | c4ebef2ff96e65e197c6c995fb896f72d50de747 | [
"MIT"
] | 2 | 2021-03-24T13:40:52.000Z | 2021-11-16T12:28:58.000Z | continuous_Dice_coefficient.py | rubyshamir/cDC | c4ebef2ff96e65e197c6c995fb896f72d50de747 | [
"MIT"
] | null | null | null | continuous_Dice_coefficient.py | rubyshamir/cDC | c4ebef2ff96e65e197c6c995fb896f72d50de747 | [
"MIT"
] | null | null | null | import numpy as np
'''
Implementation of the continuous Dice Coefficient (https://www.biorxiv.org/content/10.1101/306977v1.full.pdf)
"Continuous Dice Coefficient: a Method for Evaluating Probabilistic Segmentations"
Reuben R Shamir,Yuval Duchin, Jinyoung Kim, Guillermo Sapiro, and Noam Harel
Input:
A - ground-truth or gold-standard binary segmentation (expert labeled data; assumes values are 0 or 1)
B - segmentation probabilistic map (your algorithm's output; assumes values are between 0-1)
Author: Ruby Shamir (feedback is welcome at shamir.ruby at gmail)
'''
def continous_Dice_coefficient(A_binary, B_probability_map):
AB = A_binary * B_probability_map
c = np.sum(AB)/max(np.size(AB[AB>0]), 1)
cDC = 2*(np.sum(AB))/(c*np.sum(A_binary) + np.sum(B_probability_map))
return cDC
def Dice_coefficient(A_binary, B_binary):
AB = A_binary * B_binary
DC = 2*(np.sum(AB))/(np.sum(A_binary) + np.sum(B_binary))
return DC
def simulate_probablistic_segmentation (start, end):
x, y = np.meshgrid(np.linspace(start, end, 100), np.linspace(start, end, 100))
d = np.array(np.sqrt(x * x + y * y))
mu = 0.0
sigma = 2.0
segmentation_result = np.exp(-((d - mu) * (d - mu) / (2.0 * sigma * sigma)))
segmentation_result[segmentation_result<0.01] = 0
return segmentation_result
## compare Dice and continous Dice under simulated error #########
if __name__ == '__main__':
# in this example we simulate a ground truth segmentation (circle) and probabilistic segmentation results (gaussian)
# the we demonstrate the cDC is less sensitive for shifts in segmentation than Dice Coefficient
all_cDice = list()
all_Dice = list()
start = -10
end = 10
segmentation_result = simulate_probablistic_segmentation (start, end)
ground_truth_simulated = np.ones_like(segmentation_result)
ground_truth_simulated[segmentation_result < 0.01] = 0
cDC = continous_Dice_coefficient(ground_truth_simulated, segmentation_result)
all_cDice.append(cDC)
binary_segmentation_result = np.zeros_like(segmentation_result)
binary_segmentation_result[segmentation_result > 0.01] = 1
DC = Dice_coefficient(ground_truth_simulated, binary_segmentation_result)
all_Dice.append(DC)
step = 2
for shift in range(0, 4):
segmentation_result = np.hstack((segmentation_result, np.zeros((segmentation_result.shape[0],step))))
segmentation_result = np.delete(segmentation_result, range(0, step),1)
cDC = continous_Dice_coefficient(ground_truth_simulated, segmentation_result)
all_cDice.append(cDC)
binary_segmentation_result = np.zeros_like(segmentation_result)
binary_segmentation_result[segmentation_result>0.01] = 1
# when the input is binary continues Dice Coefficient returns the Dice Coefficient.
DC = Dice_coefficient(ground_truth_simulated, binary_segmentation_result)
all_Dice.append(DC)
all_cDice = [str(round(val,2)) for val in all_cDice]
all_Dice = [str(round(val, 2)) for val in all_Dice]
print ('Shift errors of: (mm)')
print ([str(round(i,2)) for i in range(0, 10, 2)])
print('Reduced the continues Dice:')
print (all_cDice)
print('And the original Dice is:')
print (all_Dice)
| 39.127907 | 121 | 0.702526 | import numpy as np
def continous_Dice_coefficient(A_binary, B_probability_map):
AB = A_binary * B_probability_map
c = np.sum(AB)/max(np.size(AB[AB>0]), 1)
cDC = 2*(np.sum(AB))/(c*np.sum(A_binary) + np.sum(B_probability_map))
return cDC
def Dice_coefficient(A_binary, B_binary):
AB = A_binary * B_binary
DC = 2*(np.sum(AB))/(np.sum(A_binary) + np.sum(B_binary))
return DC
def simulate_probablistic_segmentation (start, end):
x, y = np.meshgrid(np.linspace(start, end, 100), np.linspace(start, end, 100))
d = np.array(np.sqrt(x * x + y * y))
mu = 0.0
sigma = 2.0
segmentation_result = np.exp(-((d - mu) * (d - mu) / (2.0 * sigma * sigma)))
segmentation_result[segmentation_result<0.01] = 0
return segmentation_result
0
segmentation_result = simulate_probablistic_segmentation (start, end)
ground_truth_simulated = np.ones_like(segmentation_result)
ground_truth_simulated[segmentation_result < 0.01] = 0
cDC = continous_Dice_coefficient(ground_truth_simulated, segmentation_result)
all_cDice.append(cDC)
binary_segmentation_result = np.zeros_like(segmentation_result)
binary_segmentation_result[segmentation_result > 0.01] = 1
DC = Dice_coefficient(ground_truth_simulated, binary_segmentation_result)
all_Dice.append(DC)
step = 2
for shift in range(0, 4):
segmentation_result = np.hstack((segmentation_result, np.zeros((segmentation_result.shape[0],step))))
segmentation_result = np.delete(segmentation_result, range(0, step),1)
cDC = continous_Dice_coefficient(ground_truth_simulated, segmentation_result)
all_cDice.append(cDC)
binary_segmentation_result = np.zeros_like(segmentation_result)
binary_segmentation_result[segmentation_result>0.01] = 1
DC = Dice_coefficient(ground_truth_simulated, binary_segmentation_result)
all_Dice.append(DC)
all_cDice = [str(round(val,2)) for val in all_cDice]
all_Dice = [str(round(val, 2)) for val in all_Dice]
print ('Shift errors of: (mm)')
print ([str(round(i,2)) for i in range(0, 10, 2)])
print('Reduced the continues Dice:')
print (all_cDice)
print('And the original Dice is:')
print (all_Dice)
| true | true |
f73c6e6cea3b1fc5eb5b7a12f1e046830c604785 | 37,837 | py | Python | aggregator.py | Mattlk13/dd-agent | 167d0c0ed8d7b66a531dd0c21097f0fa2fba8960 | [
"BSD-3-Clause"
] | 1,172 | 2015-01-04T21:56:16.000Z | 2022-03-13T00:01:44.000Z | aggregator.py | Mattlk13/dd-agent | 167d0c0ed8d7b66a531dd0c21097f0fa2fba8960 | [
"BSD-3-Clause"
] | 2,086 | 2015-01-02T16:33:21.000Z | 2022-03-15T10:01:47.000Z | aggregator.py | Mattlk13/dd-agent | 167d0c0ed8d7b66a531dd0c21097f0fa2fba8960 | [
"BSD-3-Clause"
] | 972 | 2015-01-02T05:03:46.000Z | 2022-03-23T04:36:19.000Z | # (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import logging
from time import time
# project
from checks.metric_types import MetricTypes
log = logging.getLogger(__name__)
# This is used to ensure that metrics with a timestamp older than
# RECENT_POINT_THRESHOLD_DEFAULT seconds (or the value passed in to
# the MetricsAggregator constructor) get discarded rather than being
# input into the incorrect bucket. Currently, the MetricsAggregator
# does not support submitting values for the past, and all values get
# submitted for the timestamp passed into the flush() function.
# The MetricsBucketAggregator uses times that are aligned to "buckets"
# that are the length of the interval that is passed into the
# MetricsBucketAggregator constructor.
RECENT_POINT_THRESHOLD_DEFAULT = 3600
class Infinity(Exception):
pass
class UnknownValue(Exception):
pass
class Metric(object):
"""
A base metric class that accepts points, slices them into time intervals
and performs roll-ups within those intervals.
"""
def sample(self, value, sample_rate, timestamp=None):
""" Add a point to the given metric. """
raise NotImplementedError()
def flush(self, timestamp, interval):
""" Flush all metrics up to the given timestamp. """
raise NotImplementedError()
class Gauge(Metric):
""" A metric that tracks a value at particular points in time. """
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.value = None
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.last_sample_time = None
self.timestamp = time()
def sample(self, value, sample_rate, timestamp=None):
self.value = value
self.last_sample_time = time()
self.timestamp = timestamp
def flush(self, timestamp, interval):
if self.value is not None:
res = [self.formatter(
metric=self.name,
timestamp=self.timestamp or timestamp,
value=self.value,
tags=self.tags,
hostname=self.hostname,
device_name=self.device_name,
metric_type=MetricTypes.GAUGE,
interval=interval,
)]
self.value = None
return res
return []
class BucketGauge(Gauge):
""" A metric that tracks a value at particular points in time.
The difference beween this class and Gauge is that this class will
report that gauge sample time as the time that Metric is flushed, as
opposed to the time that the sample was collected.
"""
def flush(self, timestamp, interval):
if self.value is not None:
res = [self.formatter(
metric=self.name,
timestamp=timestamp,
value=self.value,
tags=self.tags,
hostname=self.hostname,
device_name=self.device_name,
metric_type=MetricTypes.GAUGE,
interval=interval,
)]
self.value = None
return res
return []
class Count(Metric):
""" A metric that tracks a count. """
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.value = None
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
self.value = (self.value or 0) + value
self.last_sample_time = time()
def flush(self, timestamp, interval):
if self.value is None:
return []
try:
return [self.formatter(
metric=self.name,
value=self.value,
timestamp=timestamp,
tags=self.tags,
hostname=self.hostname,
device_name=self.device_name,
metric_type=MetricTypes.COUNT,
interval=interval,
)]
finally:
self.value = None
class MonotonicCount(Metric):
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.prev_counter = None
self.curr_counter = None
self.count = None
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
if self.curr_counter is None:
self.curr_counter = value
else:
self.prev_counter = self.curr_counter
self.curr_counter = value
prev = self.prev_counter
curr = self.curr_counter
if prev is not None and curr is not None:
self.count = (self.count or 0) + max(0, curr - prev)
self.last_sample_time = time()
def flush(self, timestamp, interval):
if self.count is None:
return []
try:
return [self.formatter(
hostname=self.hostname,
device_name=self.device_name,
tags=self.tags,
metric=self.name,
value=self.count,
timestamp=timestamp,
metric_type=MetricTypes.COUNT,
interval=interval
)]
finally:
self.prev_counter = self.curr_counter
self.curr_counter = None
self.count = None
class Counter(Metric):
""" A metric that tracks a counter value. """
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.value = 0
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
self.value += value * int(1 / sample_rate)
self.last_sample_time = time()
def flush(self, timestamp, interval):
try:
value = self.value / interval
return [self.formatter(
metric=self.name,
value=value,
timestamp=timestamp,
tags=self.tags,
hostname=self.hostname,
device_name=self.device_name,
metric_type=MetricTypes.RATE,
interval=interval,
)]
finally:
self.value = 0
DEFAULT_HISTOGRAM_AGGREGATES = ['max', 'median', 'avg', 'count']
DEFAULT_HISTOGRAM_PERCENTILES = [0.95]
class Histogram(Metric):
""" A metric to track the distribution of a set of values. """
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.count = 0
self.samples = []
self.aggregates = extra_config['aggregates'] if\
extra_config is not None and extra_config.get('aggregates') is not None\
else DEFAULT_HISTOGRAM_AGGREGATES
self.percentiles = extra_config['percentiles'] if\
extra_config is not None and extra_config.get('percentiles') is not None\
else DEFAULT_HISTOGRAM_PERCENTILES
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
self.count += int(1 / sample_rate)
self.samples.append(value)
self.last_sample_time = time()
def flush(self, ts, interval):
if not self.count:
return []
self.samples.sort()
length = len(self.samples)
min_ = self.samples[0]
max_ = self.samples[-1]
med = self.samples[int(round(length/2 - 1))]
sum_ = sum(self.samples)
avg = sum_ / float(length)
aggregators = [
('min', min_, MetricTypes.GAUGE),
('max', max_, MetricTypes.GAUGE),
('median', med, MetricTypes.GAUGE),
('avg', avg, MetricTypes.GAUGE),
('sum', sum_, MetricTypes.GAUGE),
('count', self.count/interval, MetricTypes.RATE),
]
metric_aggrs = [
(agg_name, agg_func, m_type)
for agg_name, agg_func, m_type in aggregators
if agg_name in self.aggregates
]
metrics = [self.formatter(
hostname=self.hostname,
device_name=self.device_name,
tags=self.tags,
metric='%s.%s' % (self.name, suffix),
value=value,
timestamp=ts,
metric_type=metric_type,
interval=interval) for suffix, value, metric_type in metric_aggrs
]
for p in self.percentiles:
val = self.samples[int(round(p * length - 1))]
name = '%s.%spercentile' % (self.name, int(p * 100))
metrics.append(self.formatter(
hostname=self.hostname,
tags=self.tags,
metric=name,
value=val,
timestamp=ts,
metric_type=MetricTypes.GAUGE,
interval=interval,
))
# Reset our state.
self.samples = []
self.count = 0
return metrics
class Set(Metric):
""" A metric to track the number of unique elements in a set. """
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.values = set()
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
self.values.add(value)
self.last_sample_time = time()
def flush(self, timestamp, interval):
if not self.values:
return []
try:
return [self.formatter(
hostname=self.hostname,
device_name=self.device_name,
tags=self.tags,
metric=self.name,
value=len(self.values),
timestamp=timestamp,
metric_type=MetricTypes.GAUGE,
interval=interval,
)]
finally:
self.values = set()
class Rate(Metric):
""" Track the rate of metrics over each flush interval """
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.samples = []
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
ts = time()
self.samples.append((int(ts), value))
self.last_sample_time = ts
def _rate(self, sample1, sample2):
interval = sample2[0] - sample1[0]
if interval == 0:
log.warn('Metric %s has an interval of 0. Not flushing.' % self.name)
raise Infinity()
delta = sample2[1] - sample1[1]
if delta < 0:
log.info('Metric %s has a rate < 0. Counter may have been Reset.' % self.name)
raise UnknownValue()
return (delta / float(interval))
def flush(self, timestamp, interval):
if len(self.samples) < 2:
return []
try:
try:
val = self._rate(self.samples[-2], self.samples[-1])
except Exception:
return []
return [self.formatter(
hostname=self.hostname,
device_name=self.device_name,
tags=self.tags,
metric=self.name,
value=val,
timestamp=timestamp,
metric_type=MetricTypes.GAUGE,
interval=interval
)]
finally:
self.samples = self.samples[-1:]
class Aggregator(object):
"""
Abstract metric aggregator class.
"""
# Types of metrics that allow strings
ALLOW_STRINGS = ['s', ]
# Types that are not implemented and ignored
IGNORE_TYPES = ['d', ]
def __init__(self, hostname, interval=1.0, expiry_seconds=300,
formatter=None, recent_point_threshold=None,
histogram_aggregates=None, histogram_percentiles=None,
utf8_decoding=False):
self.events = []
self.service_checks = []
self.total_count = 0
self.count = 0
self.event_count = 0
self.service_check_count = 0
self.hostname = hostname
self.expiry_seconds = expiry_seconds
self.formatter = formatter or api_formatter
self.interval = float(interval)
recent_point_threshold = recent_point_threshold or RECENT_POINT_THRESHOLD_DEFAULT
self.recent_point_threshold = int(recent_point_threshold)
self.num_discarded_old_points = 0
# Additional config passed when instantiating metric configs
self.metric_config = {
Histogram: {
'aggregates': histogram_aggregates,
'percentiles': histogram_percentiles
}
}
self.utf8_decoding = utf8_decoding
def deduplicate_tags(self, tags):
return sorted(set(tags))
def packets_per_second(self, interval):
if interval == 0:
return 0
return round(float(self.count)/interval, 2)
def parse_metric_packet(self, packet):
"""
Schema of a dogstatsd packet:
<name>:<value>|<metric_type>|@<sample_rate>|#<tag1_name>:<tag1_value>,<tag2_name>:<tag2_value>:<value>|<metric_type>...
"""
parsed_packets = []
name_and_metadata = packet.split(':', 1)
if len(name_and_metadata) != 2:
raise Exception(u'Unparseable metric packet: %s' % packet)
name = name_and_metadata[0]
broken_split = name_and_metadata[1].split(':')
data = []
partial_datum = None
for token in broken_split:
# We need to fix the tag groups that got broken by the : split
if partial_datum is None:
partial_datum = token
elif "|" not in token:
partial_datum += ":" + token
else:
data.append(partial_datum)
partial_datum = token
data.append(partial_datum)
for datum in data:
value_and_metadata = datum.split('|')
if len(value_and_metadata) < 2:
raise Exception(u'Unparseable metric packet: %s' % packet)
# Submit the metric
raw_value = value_and_metadata[0]
metric_type = value_and_metadata[1]
if metric_type in self.ALLOW_STRINGS:
value = raw_value
elif len(metric_type) > 0 and metric_type[0] in self.IGNORE_TYPES:
continue
else:
# Try to cast as an int first to avoid precision issues, then as a
# float.
try:
value = int(raw_value)
except ValueError:
try:
value = float(raw_value)
except ValueError:
# Otherwise, raise an error saying it must be a number
raise Exception(u'Metric value must be a number: %s, %s' % (name, raw_value))
# Parse the optional values - sample rate & tags.
sample_rate = 1
tags = None
try:
for m in value_and_metadata[2:]:
# Parse the sample rate
if m[0] == '@':
sample_rate = float(m[1:])
# in case it's in a bad state
sample_rate = 1 if sample_rate < 0 or sample_rate > 1 else sample_rate
elif m[0] == '#':
tags = tuple(sorted(m[1:].split(',')))
except IndexError:
log.warning(u'Incorrect metric metadata: metric_name:%s, metadata:%s',
name, u' '.join(value_and_metadata[2:]))
parsed_packets.append((name, value, metric_type, tags, sample_rate))
return parsed_packets
def _unescape_sc_content(self, string):
return string.replace('\\n', '\n').replace('m\:', 'm:')
def _unescape_event_text(self, string):
return string.replace('\\n', '\n')
def parse_event_packet(self, packet):
try:
name_and_metadata = packet.split(':', 1)
if len(name_and_metadata) != 2:
raise Exception(u'Unparseable event packet: %s' % packet)
# Event syntax:
# _e{5,4}:title|body|meta
name = name_and_metadata[0]
metadata = name_and_metadata[1]
title_length, text_length = name.split(',')
title_length = int(title_length[3:])
text_length = int(text_length[:-1])
event = {
'title': metadata[:title_length],
'text': self._unescape_event_text(metadata[title_length+1:title_length+text_length+1])
}
meta = metadata[title_length+text_length+1:]
for m in meta.split('|')[1:]:
if m[0] == u't':
event['alert_type'] = m[2:]
elif m[0] == u'k':
event['aggregation_key'] = m[2:]
elif m[0] == u's':
event['source_type_name'] = m[2:]
elif m[0] == u'd':
event['date_happened'] = int(m[2:])
elif m[0] == u'p':
event['priority'] = m[2:]
elif m[0] == u'h':
event['hostname'] = m[2:]
elif m[0] == u'#':
event['tags'] = self.deduplicate_tags(m[1:].split(u','))
return event
except (IndexError, ValueError):
raise Exception(u'Unparseable event packet: %s' % packet)
def parse_sc_packet(self, packet):
try:
_, data_and_metadata = packet.split('|', 1)
# Service check syntax:
# _sc|check_name|status|meta
if data_and_metadata.count('|') == 1:
# Case with no metadata
check_name, status = data_and_metadata.split('|')
metadata = ''
else:
check_name, status, metadata = data_and_metadata.split('|', 2)
service_check = {
'check_name': check_name,
'status': int(status)
}
message_delimiter = 'm:' if metadata.startswith('m:') else '|m:'
if message_delimiter in metadata:
meta, message = metadata.rsplit(message_delimiter, 1)
service_check['message'] = self._unescape_sc_content(message)
else:
meta = metadata
if not meta:
return service_check
meta = unicode(meta)
for m in meta.split('|'):
if m[0] == u'd':
service_check['timestamp'] = float(m[2:])
elif m[0] == u'h':
service_check['hostname'] = m[2:]
elif m[0] == u'#':
service_check['tags'] = self.deduplicate_tags(m[1:].split(u','))
return service_check
except (IndexError, ValueError):
raise Exception(u'Unparseable service check packet: %s' % packet)
def submit_packets(self, packets):
# We should probably consider that packets are always encoded
# in utf8, but decoding all packets has an perf overhead of 7%
# So we let the user decide if we wants utf8 by default
# Keep a very conservative approach anyhow
# Clients MUST always send UTF-8 encoded content
if self.utf8_decoding:
packets = unicode(packets, 'utf-8', errors='replace')
for packet in packets.splitlines():
if not packet.strip():
continue
if packet.startswith('_e'):
event = self.parse_event_packet(packet)
self.event(**event)
self.event_count += 1
elif packet.startswith('_sc'):
service_check = self.parse_sc_packet(packet)
self.service_check(**service_check)
self.service_check_count += 1
else:
parsed_packets = self.parse_metric_packet(packet)
self.count += 1
for name, value, mtype, tags, sample_rate in parsed_packets:
hostname, device_name, tags = self._extract_magic_tags(tags)
self.submit_metric(name, value, mtype, tags=tags, hostname=hostname,
device_name=device_name, sample_rate=sample_rate)
def _extract_magic_tags(self, tags):
"""Magic tags (host, device) override metric hostname and device_name attributes"""
hostname = None
device_name = None
# This implementation avoid list operations for the common case
if tags:
tags_to_remove = []
for tag in tags:
if tag.startswith('host:'):
hostname = tag[5:]
tags_to_remove.append(tag)
elif tag.startswith('device:'):
device_name = tag[7:]
tags_to_remove.append(tag)
if tags_to_remove:
# tags is a tuple already sorted, we convert it into a list to pop elements
tags = list(tags)
for tag in tags_to_remove:
tags.remove(tag)
tags = tuple(tags) or None
return hostname, device_name, tags
def submit_metric(self, name, value, mtype, tags=None, hostname=None,
device_name=None, timestamp=None, sample_rate=1):
""" Add a metric to be aggregated """
raise NotImplementedError()
def event(self, title, text, date_happened=None, alert_type=None, aggregation_key=None, source_type_name=None, priority=None, tags=None, hostname=None):
event = {
'msg_title': title,
'msg_text': text,
}
if date_happened is not None:
event['timestamp'] = date_happened
else:
event['timestamp'] = int(time())
if alert_type is not None:
event['alert_type'] = alert_type
if aggregation_key is not None:
event['aggregation_key'] = aggregation_key
if source_type_name is not None:
event['source_type_name'] = source_type_name
if priority is not None:
event['priority'] = priority
if tags is not None:
event['tags'] = self.deduplicate_tags(tags)
if hostname is not None:
event['host'] = hostname
else:
event['host'] = self.hostname
self.events.append(event)
def service_check(self, check_name, status, tags=None, timestamp=None,
hostname=None, message=None):
service_check = {
'check': check_name,
'status': status,
'timestamp': timestamp or int(time())
}
if tags is not None:
service_check['tags'] = self.deduplicate_tags(tags)
if hostname is not None:
service_check['host_name'] = hostname
else:
service_check['host_name'] = self.hostname
if message is not None:
service_check['message'] = message
self.service_checks.append(service_check)
def flush(self):
""" Flush aggregated metrics """
raise NotImplementedError()
def flush_events(self):
events = self.events
self.events = []
self.total_count += self.event_count
self.event_count = 0
log.debug("Received %d events since last flush" % len(events))
return events
def flush_service_checks(self):
service_checks = self.service_checks
self.service_checks = []
self.total_count += self.service_check_count
self.service_check_count = 0
log.debug("Received {0} service check runs since last flush".format(len(service_checks)))
return service_checks
def send_packet_count(self, metric_name):
self.submit_metric(metric_name, self.count, 'g')
class MetricsBucketAggregator(Aggregator):
"""
A metric aggregator class.
"""
def __init__(self, hostname, interval=1.0, expiry_seconds=300,
formatter=None, recent_point_threshold=None,
histogram_aggregates=None, histogram_percentiles=None,
utf8_decoding=False):
super(MetricsBucketAggregator, self).__init__(
hostname,
interval,
expiry_seconds,
formatter,
recent_point_threshold,
histogram_aggregates,
histogram_percentiles,
utf8_decoding
)
self.metric_by_bucket = {}
self.last_sample_time_by_context = {}
self.current_bucket = None
self.current_mbc = {}
self.last_flush_cutoff_time = 0
self.metric_type_to_class = {
'g': BucketGauge,
'c': Counter,
'h': Histogram,
'ms': Histogram,
's': Set,
}
def calculate_bucket_start(self, timestamp):
return timestamp - (timestamp % self.interval)
def submit_metric(self, name, value, mtype, tags=None, hostname=None,
device_name=None, timestamp=None, sample_rate=1):
# Avoid calling extra functions to dedupe tags if there are none
# Note: if you change the way that context is created, please also change create_empty_metrics,
# which counts on this order
# Keep hostname with empty string to unset it
hostname = hostname if hostname is not None else self.hostname
if tags is None:
context = (name, tuple(), hostname, device_name)
else:
tags = tuple(self.deduplicate_tags(tags))
context = (name, tags, hostname, device_name)
cur_time = time()
# Check to make sure that the timestamp that is passed in (if any) is not older than
# recent_point_threshold. If so, discard the point.
if timestamp is not None and cur_time - int(timestamp) > self.recent_point_threshold:
log.debug("Discarding %s - ts = %s , current ts = %s " % (name, timestamp, cur_time))
self.num_discarded_old_points += 1
else:
timestamp = timestamp or cur_time
# Keep track of the buckets using the timestamp at the start time of the bucket
bucket_start_timestamp = self.calculate_bucket_start(timestamp)
if bucket_start_timestamp == self.current_bucket:
metric_by_context = self.current_mbc
else:
if bucket_start_timestamp not in self.metric_by_bucket:
self.metric_by_bucket[bucket_start_timestamp] = {}
metric_by_context = self.metric_by_bucket[bucket_start_timestamp]
self.current_bucket = bucket_start_timestamp
self.current_mbc = metric_by_context
if context not in metric_by_context:
metric_class = self.metric_type_to_class[mtype]
metric_by_context[context] = metric_class(self.formatter, name, tags,
hostname, device_name, self.metric_config.get(metric_class))
metric_by_context[context].sample(value, sample_rate, timestamp)
def create_empty_metrics(self, sample_time_by_context, expiry_timestamp, flush_timestamp, metrics):
# Even if no data is submitted, Counters keep reporting "0" for expiry_seconds. The other Metrics
# (Set, Gauge, Histogram) do not report if no data is submitted
for context, last_sample_time in sample_time_by_context.items():
if last_sample_time < expiry_timestamp:
log.debug("%s hasn't been submitted in %ss. Expiring." % (context, self.expiry_seconds))
self.last_sample_time_by_context.pop(context, None)
else:
# The expiration currently only applies to Counters
# This counts on the ordering of the context created in submit_metric not changing
metric = Counter(self.formatter, context[0], context[1], context[2], context[3])
metrics += metric.flush(flush_timestamp, self.interval)
def flush(self):
cur_time = time()
flush_cutoff_time = self.calculate_bucket_start(cur_time)
expiry_timestamp = cur_time - self.expiry_seconds
metrics = []
if self.metric_by_bucket:
# We want to process these in order so that we can check for and expired metrics and
# re-create non-expired metrics. We also mutate self.metric_by_bucket.
for bucket_start_timestamp in sorted(self.metric_by_bucket.keys()):
metric_by_context = self.metric_by_bucket[bucket_start_timestamp]
if bucket_start_timestamp < flush_cutoff_time:
not_sampled_in_this_bucket = self.last_sample_time_by_context.copy()
# We mutate this dictionary while iterating so don't use an iterator.
for context, metric in metric_by_context.items():
if metric.last_sample_time < expiry_timestamp:
# This should never happen
log.warning("%s hasn't been submitted in %ss. Expiring." % (context, self.expiry_seconds))
not_sampled_in_this_bucket.pop(context, None)
self.last_sample_time_by_context.pop(context, None)
else:
metrics += metric.flush(bucket_start_timestamp, self.interval)
if isinstance(metric, Counter):
self.last_sample_time_by_context[context] = metric.last_sample_time
not_sampled_in_this_bucket.pop(context, None)
# We need to account for Metrics that have not expired and were not flushed for this bucket
self.create_empty_metrics(not_sampled_in_this_bucket, expiry_timestamp, bucket_start_timestamp, metrics)
del self.metric_by_bucket[bucket_start_timestamp]
else:
# Even if there are no metrics in this flush, there may be some non-expired counters
# We should only create these non-expired metrics if we've passed an interval since the last flush
if flush_cutoff_time >= self.last_flush_cutoff_time + self.interval:
self.create_empty_metrics(self.last_sample_time_by_context.copy(), expiry_timestamp,
flush_cutoff_time-self.interval, metrics)
# Log a warning regarding metrics with old timestamps being submitted
if self.num_discarded_old_points > 0:
log.warn('%s points were discarded as a result of having an old timestamp' % self.num_discarded_old_points)
self.num_discarded_old_points = 0
# Save some stats.
log.debug("received %s payloads since last flush" % self.count)
self.total_count += self.count
self.count = 0
self.current_bucket = None
self.current_mbc = {}
self.last_flush_cutoff_time = flush_cutoff_time
return metrics
class MetricsAggregator(Aggregator):
"""
A metric aggregator class.
"""
def __init__(self, hostname, interval=1.0, expiry_seconds=300,
formatter=None, recent_point_threshold=None,
histogram_aggregates=None, histogram_percentiles=None,
utf8_decoding=False):
super(MetricsAggregator, self).__init__(
hostname,
interval,
expiry_seconds,
formatter,
recent_point_threshold,
histogram_aggregates,
histogram_percentiles,
utf8_decoding
)
self.metrics = {}
self.metric_type_to_class = {
'g': Gauge,
'ct': Count,
'ct-c': MonotonicCount,
'c': Counter,
'h': Histogram,
'ms': Histogram,
's': Set,
'_dd-r': Rate,
}
def submit_metric(self, name, value, mtype, tags=None, hostname=None,
device_name=None, timestamp=None, sample_rate=1):
# Avoid calling extra functions to dedupe tags if there are none
# Keep hostname with empty string to unset it
hostname = hostname if hostname is not None else self.hostname
if tags is None:
context = (name, tuple(), hostname, device_name)
else:
tags = tuple(self.deduplicate_tags(tags))
context = (name, tags, hostname, device_name)
if context not in self.metrics:
metric_class = self.metric_type_to_class[mtype]
self.metrics[context] = metric_class(self.formatter, name, tags,
hostname, device_name, self.metric_config.get(metric_class))
cur_time = time()
if timestamp is not None and cur_time - int(timestamp) > self.recent_point_threshold:
log.debug("Discarding %s - ts = %s , current ts = %s " % (name, timestamp, cur_time))
self.num_discarded_old_points += 1
else:
self.metrics[context].sample(value, sample_rate, timestamp)
def gauge(self, name, value, tags=None, hostname=None, device_name=None, timestamp=None):
self.submit_metric(name, value, 'g', tags, hostname, device_name, timestamp)
def increment(self, name, value=1, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 'c', tags, hostname, device_name)
def decrement(self, name, value=-1, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 'c', tags, hostname, device_name)
def rate(self, name, value, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, '_dd-r', tags, hostname, device_name)
def submit_count(self, name, value, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 'ct', tags, hostname, device_name)
def count_from_counter(self, name, value, tags=None,
hostname=None, device_name=None):
self.submit_metric(name, value, 'ct-c', tags,
hostname, device_name)
def histogram(self, name, value, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 'h', tags, hostname, device_name)
def set(self, name, value, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 's', tags, hostname, device_name)
def flush(self):
timestamp = time()
expiry_timestamp = timestamp - self.expiry_seconds
# Flush points and remove expired metrics. We mutate this dictionary
# while iterating so don't use an iterator.
metrics = []
for context, metric in self.metrics.items():
if metric.last_sample_time < expiry_timestamp:
log.debug("%s hasn't been submitted in %ss. Expiring." % (context, self.expiry_seconds))
del self.metrics[context]
else:
metrics += metric.flush(timestamp, self.interval)
# Log a warning regarding metrics with old timestamps being submitted
if self.num_discarded_old_points > 0:
log.warn('%s points were discarded as a result of having an old timestamp' % self.num_discarded_old_points)
self.num_discarded_old_points = 0
# Save some stats.
log.debug("received %s payloads since last flush" % self.count)
self.total_count += self.count
self.count = 0
return metrics
def get_formatter(config):
formatter = api_formatter
if config['statsd_metric_namespace']:
def metric_namespace_formatter_wrapper(metric, value, timestamp, tags,
hostname=None, device_name=None,
metric_type=None, interval=None):
metric_prefix = config['statsd_metric_namespace']
if metric_prefix[-1] != '.':
metric_prefix += '.'
return api_formatter(metric_prefix + metric, value, timestamp, tags, hostname,
device_name, metric_type, interval)
formatter = metric_namespace_formatter_wrapper
return formatter
def api_formatter(metric, value, timestamp, tags, hostname=None, device_name=None,
metric_type=None, interval=None):
return {
'metric': metric,
'points': [(timestamp, value)],
'tags': tags,
'host': hostname,
'device_name': device_name,
'type': metric_type or MetricTypes.GAUGE,
'interval':interval,
}
| 37.648756 | 156 | 0.583318 |
import logging
from time import time
from checks.metric_types import MetricTypes
log = logging.getLogger(__name__)
RECENT_POINT_THRESHOLD_DEFAULT = 3600
class Infinity(Exception):
pass
class UnknownValue(Exception):
pass
class Metric(object):
def sample(self, value, sample_rate, timestamp=None):
raise NotImplementedError()
def flush(self, timestamp, interval):
raise NotImplementedError()
class Gauge(Metric):
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.value = None
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.last_sample_time = None
self.timestamp = time()
def sample(self, value, sample_rate, timestamp=None):
self.value = value
self.last_sample_time = time()
self.timestamp = timestamp
def flush(self, timestamp, interval):
if self.value is not None:
res = [self.formatter(
metric=self.name,
timestamp=self.timestamp or timestamp,
value=self.value,
tags=self.tags,
hostname=self.hostname,
device_name=self.device_name,
metric_type=MetricTypes.GAUGE,
interval=interval,
)]
self.value = None
return res
return []
class BucketGauge(Gauge):
def flush(self, timestamp, interval):
if self.value is not None:
res = [self.formatter(
metric=self.name,
timestamp=timestamp,
value=self.value,
tags=self.tags,
hostname=self.hostname,
device_name=self.device_name,
metric_type=MetricTypes.GAUGE,
interval=interval,
)]
self.value = None
return res
return []
class Count(Metric):
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.value = None
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
self.value = (self.value or 0) + value
self.last_sample_time = time()
def flush(self, timestamp, interval):
if self.value is None:
return []
try:
return [self.formatter(
metric=self.name,
value=self.value,
timestamp=timestamp,
tags=self.tags,
hostname=self.hostname,
device_name=self.device_name,
metric_type=MetricTypes.COUNT,
interval=interval,
)]
finally:
self.value = None
class MonotonicCount(Metric):
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.prev_counter = None
self.curr_counter = None
self.count = None
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
if self.curr_counter is None:
self.curr_counter = value
else:
self.prev_counter = self.curr_counter
self.curr_counter = value
prev = self.prev_counter
curr = self.curr_counter
if prev is not None and curr is not None:
self.count = (self.count or 0) + max(0, curr - prev)
self.last_sample_time = time()
def flush(self, timestamp, interval):
if self.count is None:
return []
try:
return [self.formatter(
hostname=self.hostname,
device_name=self.device_name,
tags=self.tags,
metric=self.name,
value=self.count,
timestamp=timestamp,
metric_type=MetricTypes.COUNT,
interval=interval
)]
finally:
self.prev_counter = self.curr_counter
self.curr_counter = None
self.count = None
class Counter(Metric):
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.value = 0
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
self.value += value * int(1 / sample_rate)
self.last_sample_time = time()
def flush(self, timestamp, interval):
try:
value = self.value / interval
return [self.formatter(
metric=self.name,
value=value,
timestamp=timestamp,
tags=self.tags,
hostname=self.hostname,
device_name=self.device_name,
metric_type=MetricTypes.RATE,
interval=interval,
)]
finally:
self.value = 0
DEFAULT_HISTOGRAM_AGGREGATES = ['max', 'median', 'avg', 'count']
DEFAULT_HISTOGRAM_PERCENTILES = [0.95]
class Histogram(Metric):
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.count = 0
self.samples = []
self.aggregates = extra_config['aggregates'] if\
extra_config is not None and extra_config.get('aggregates') is not None\
else DEFAULT_HISTOGRAM_AGGREGATES
self.percentiles = extra_config['percentiles'] if\
extra_config is not None and extra_config.get('percentiles') is not None\
else DEFAULT_HISTOGRAM_PERCENTILES
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
self.count += int(1 / sample_rate)
self.samples.append(value)
self.last_sample_time = time()
def flush(self, ts, interval):
if not self.count:
return []
self.samples.sort()
length = len(self.samples)
min_ = self.samples[0]
max_ = self.samples[-1]
med = self.samples[int(round(length/2 - 1))]
sum_ = sum(self.samples)
avg = sum_ / float(length)
aggregators = [
('min', min_, MetricTypes.GAUGE),
('max', max_, MetricTypes.GAUGE),
('median', med, MetricTypes.GAUGE),
('avg', avg, MetricTypes.GAUGE),
('sum', sum_, MetricTypes.GAUGE),
('count', self.count/interval, MetricTypes.RATE),
]
metric_aggrs = [
(agg_name, agg_func, m_type)
for agg_name, agg_func, m_type in aggregators
if agg_name in self.aggregates
]
metrics = [self.formatter(
hostname=self.hostname,
device_name=self.device_name,
tags=self.tags,
metric='%s.%s' % (self.name, suffix),
value=value,
timestamp=ts,
metric_type=metric_type,
interval=interval) for suffix, value, metric_type in metric_aggrs
]
for p in self.percentiles:
val = self.samples[int(round(p * length - 1))]
name = '%s.%spercentile' % (self.name, int(p * 100))
metrics.append(self.formatter(
hostname=self.hostname,
tags=self.tags,
metric=name,
value=val,
timestamp=ts,
metric_type=MetricTypes.GAUGE,
interval=interval,
))
self.samples = []
self.count = 0
return metrics
class Set(Metric):
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.values = set()
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
self.values.add(value)
self.last_sample_time = time()
def flush(self, timestamp, interval):
if not self.values:
return []
try:
return [self.formatter(
hostname=self.hostname,
device_name=self.device_name,
tags=self.tags,
metric=self.name,
value=len(self.values),
timestamp=timestamp,
metric_type=MetricTypes.GAUGE,
interval=interval,
)]
finally:
self.values = set()
class Rate(Metric):
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.samples = []
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
ts = time()
self.samples.append((int(ts), value))
self.last_sample_time = ts
def _rate(self, sample1, sample2):
interval = sample2[0] - sample1[0]
if interval == 0:
log.warn('Metric %s has an interval of 0. Not flushing.' % self.name)
raise Infinity()
delta = sample2[1] - sample1[1]
if delta < 0:
log.info('Metric %s has a rate < 0. Counter may have been Reset.' % self.name)
raise UnknownValue()
return (delta / float(interval))
def flush(self, timestamp, interval):
if len(self.samples) < 2:
return []
try:
try:
val = self._rate(self.samples[-2], self.samples[-1])
except Exception:
return []
return [self.formatter(
hostname=self.hostname,
device_name=self.device_name,
tags=self.tags,
metric=self.name,
value=val,
timestamp=timestamp,
metric_type=MetricTypes.GAUGE,
interval=interval
)]
finally:
self.samples = self.samples[-1:]
class Aggregator(object):
ALLOW_STRINGS = ['s', ]
IGNORE_TYPES = ['d', ]
def __init__(self, hostname, interval=1.0, expiry_seconds=300,
formatter=None, recent_point_threshold=None,
histogram_aggregates=None, histogram_percentiles=None,
utf8_decoding=False):
self.events = []
self.service_checks = []
self.total_count = 0
self.count = 0
self.event_count = 0
self.service_check_count = 0
self.hostname = hostname
self.expiry_seconds = expiry_seconds
self.formatter = formatter or api_formatter
self.interval = float(interval)
recent_point_threshold = recent_point_threshold or RECENT_POINT_THRESHOLD_DEFAULT
self.recent_point_threshold = int(recent_point_threshold)
self.num_discarded_old_points = 0
self.metric_config = {
Histogram: {
'aggregates': histogram_aggregates,
'percentiles': histogram_percentiles
}
}
self.utf8_decoding = utf8_decoding
def deduplicate_tags(self, tags):
return sorted(set(tags))
def packets_per_second(self, interval):
if interval == 0:
return 0
return round(float(self.count)/interval, 2)
def parse_metric_packet(self, packet):
parsed_packets = []
name_and_metadata = packet.split(':', 1)
if len(name_and_metadata) != 2:
raise Exception(u'Unparseable metric packet: %s' % packet)
name = name_and_metadata[0]
broken_split = name_and_metadata[1].split(':')
data = []
partial_datum = None
for token in broken_split:
if partial_datum is None:
partial_datum = token
elif "|" not in token:
partial_datum += ":" + token
else:
data.append(partial_datum)
partial_datum = token
data.append(partial_datum)
for datum in data:
value_and_metadata = datum.split('|')
if len(value_and_metadata) < 2:
raise Exception(u'Unparseable metric packet: %s' % packet)
raw_value = value_and_metadata[0]
metric_type = value_and_metadata[1]
if metric_type in self.ALLOW_STRINGS:
value = raw_value
elif len(metric_type) > 0 and metric_type[0] in self.IGNORE_TYPES:
continue
else:
try:
value = int(raw_value)
except ValueError:
try:
value = float(raw_value)
except ValueError:
raise Exception(u'Metric value must be a number: %s, %s' % (name, raw_value))
sample_rate = 1
tags = None
try:
for m in value_and_metadata[2:]:
if m[0] == '@':
sample_rate = float(m[1:])
sample_rate = 1 if sample_rate < 0 or sample_rate > 1 else sample_rate
elif m[0] == '
tags = tuple(sorted(m[1:].split(',')))
except IndexError:
log.warning(u'Incorrect metric metadata: metric_name:%s, metadata:%s',
name, u' '.join(value_and_metadata[2:]))
parsed_packets.append((name, value, metric_type, tags, sample_rate))
return parsed_packets
def _unescape_sc_content(self, string):
return string.replace('\\n', '\n').replace('m\:', 'm:')
def _unescape_event_text(self, string):
return string.replace('\\n', '\n')
def parse_event_packet(self, packet):
try:
name_and_metadata = packet.split(':', 1)
if len(name_and_metadata) != 2:
raise Exception(u'Unparseable event packet: %s' % packet)
# Event syntax:
# _e{5,4}:title|body|meta
name = name_and_metadata[0]
metadata = name_and_metadata[1]
title_length, text_length = name.split(',')
title_length = int(title_length[3:])
text_length = int(text_length[:-1])
event = {
'title': metadata[:title_length],
'text': self._unescape_event_text(metadata[title_length+1:title_length+text_length+1])
}
meta = metadata[title_length+text_length+1:]
for m in meta.split('|')[1:]:
if m[0] == u't':
event['alert_type'] = m[2:]
elif m[0] == u'k':
event['aggregation_key'] = m[2:]
elif m[0] == u's':
event['source_type_name'] = m[2:]
elif m[0] == u'd':
event['date_happened'] = int(m[2:])
elif m[0] == u'p':
event['priority'] = m[2:]
elif m[0] == u'h':
event['hostname'] = m[2:]
elif m[0] == u'
event['tags'] = self.deduplicate_tags(m[1:].split(u','))
return event
except (IndexError, ValueError):
raise Exception(u'Unparseable event packet: %s' % packet)
def parse_sc_packet(self, packet):
try:
_, data_and_metadata = packet.split('|', 1)
# Service check syntax:
# _sc|check_name|status|meta
if data_and_metadata.count('|') == 1:
# Case with no metadata
check_name, status = data_and_metadata.split('|')
metadata = ''
else:
check_name, status, metadata = data_and_metadata.split('|', 2)
service_check = {
'check_name': check_name,
'status': int(status)
}
message_delimiter = 'm:' if metadata.startswith('m:') else '|m:'
if message_delimiter in metadata:
meta, message = metadata.rsplit(message_delimiter, 1)
service_check['message'] = self._unescape_sc_content(message)
else:
meta = metadata
if not meta:
return service_check
meta = unicode(meta)
for m in meta.split('|'):
if m[0] == u'd':
service_check['timestamp'] = float(m[2:])
elif m[0] == u'h':
service_check['hostname'] = m[2:]
elif m[0] == u'
service_check['tags'] = self.deduplicate_tags(m[1:].split(u','))
return service_check
except (IndexError, ValueError):
raise Exception(u'Unparseable service check packet: %s' % packet)
def submit_packets(self, packets):
# We should probably consider that packets are always encoded
# in utf8, but decoding all packets has an perf overhead of 7%
# So we let the user decide if we wants utf8 by default
# Keep a very conservative approach anyhow
# Clients MUST always send UTF-8 encoded content
if self.utf8_decoding:
packets = unicode(packets, 'utf-8', errors='replace')
for packet in packets.splitlines():
if not packet.strip():
continue
if packet.startswith('_e'):
event = self.parse_event_packet(packet)
self.event(**event)
self.event_count += 1
elif packet.startswith('_sc'):
service_check = self.parse_sc_packet(packet)
self.service_check(**service_check)
self.service_check_count += 1
else:
parsed_packets = self.parse_metric_packet(packet)
self.count += 1
for name, value, mtype, tags, sample_rate in parsed_packets:
hostname, device_name, tags = self._extract_magic_tags(tags)
self.submit_metric(name, value, mtype, tags=tags, hostname=hostname,
device_name=device_name, sample_rate=sample_rate)
def _extract_magic_tags(self, tags):
hostname = None
device_name = None
# This implementation avoid list operations for the common case
if tags:
tags_to_remove = []
for tag in tags:
if tag.startswith('host:'):
hostname = tag[5:]
tags_to_remove.append(tag)
elif tag.startswith('device:'):
device_name = tag[7:]
tags_to_remove.append(tag)
if tags_to_remove:
# tags is a tuple already sorted, we convert it into a list to pop elements
tags = list(tags)
for tag in tags_to_remove:
tags.remove(tag)
tags = tuple(tags) or None
return hostname, device_name, tags
def submit_metric(self, name, value, mtype, tags=None, hostname=None,
device_name=None, timestamp=None, sample_rate=1):
raise NotImplementedError()
def event(self, title, text, date_happened=None, alert_type=None, aggregation_key=None, source_type_name=None, priority=None, tags=None, hostname=None):
event = {
'msg_title': title,
'msg_text': text,
}
if date_happened is not None:
event['timestamp'] = date_happened
else:
event['timestamp'] = int(time())
if alert_type is not None:
event['alert_type'] = alert_type
if aggregation_key is not None:
event['aggregation_key'] = aggregation_key
if source_type_name is not None:
event['source_type_name'] = source_type_name
if priority is not None:
event['priority'] = priority
if tags is not None:
event['tags'] = self.deduplicate_tags(tags)
if hostname is not None:
event['host'] = hostname
else:
event['host'] = self.hostname
self.events.append(event)
def service_check(self, check_name, status, tags=None, timestamp=None,
hostname=None, message=None):
service_check = {
'check': check_name,
'status': status,
'timestamp': timestamp or int(time())
}
if tags is not None:
service_check['tags'] = self.deduplicate_tags(tags)
if hostname is not None:
service_check['host_name'] = hostname
else:
service_check['host_name'] = self.hostname
if message is not None:
service_check['message'] = message
self.service_checks.append(service_check)
def flush(self):
raise NotImplementedError()
def flush_events(self):
events = self.events
self.events = []
self.total_count += self.event_count
self.event_count = 0
log.debug("Received %d events since last flush" % len(events))
return events
def flush_service_checks(self):
service_checks = self.service_checks
self.service_checks = []
self.total_count += self.service_check_count
self.service_check_count = 0
log.debug("Received {0} service check runs since last flush".format(len(service_checks)))
return service_checks
def send_packet_count(self, metric_name):
self.submit_metric(metric_name, self.count, 'g')
class MetricsBucketAggregator(Aggregator):
def __init__(self, hostname, interval=1.0, expiry_seconds=300,
formatter=None, recent_point_threshold=None,
histogram_aggregates=None, histogram_percentiles=None,
utf8_decoding=False):
super(MetricsBucketAggregator, self).__init__(
hostname,
interval,
expiry_seconds,
formatter,
recent_point_threshold,
histogram_aggregates,
histogram_percentiles,
utf8_decoding
)
self.metric_by_bucket = {}
self.last_sample_time_by_context = {}
self.current_bucket = None
self.current_mbc = {}
self.last_flush_cutoff_time = 0
self.metric_type_to_class = {
'g': BucketGauge,
'c': Counter,
'h': Histogram,
'ms': Histogram,
's': Set,
}
def calculate_bucket_start(self, timestamp):
return timestamp - (timestamp % self.interval)
def submit_metric(self, name, value, mtype, tags=None, hostname=None,
device_name=None, timestamp=None, sample_rate=1):
# Avoid calling extra functions to dedupe tags if there are none
# Note: if you change the way that context is created, please also change create_empty_metrics,
# which counts on this order
# Keep hostname with empty string to unset it
hostname = hostname if hostname is not None else self.hostname
if tags is None:
context = (name, tuple(), hostname, device_name)
else:
tags = tuple(self.deduplicate_tags(tags))
context = (name, tags, hostname, device_name)
cur_time = time()
# Check to make sure that the timestamp that is passed in (if any) is not older than
# recent_point_threshold. If so, discard the point.
if timestamp is not None and cur_time - int(timestamp) > self.recent_point_threshold:
log.debug("Discarding %s - ts = %s , current ts = %s " % (name, timestamp, cur_time))
self.num_discarded_old_points += 1
else:
timestamp = timestamp or cur_time
# Keep track of the buckets using the timestamp at the start time of the bucket
bucket_start_timestamp = self.calculate_bucket_start(timestamp)
if bucket_start_timestamp == self.current_bucket:
metric_by_context = self.current_mbc
else:
if bucket_start_timestamp not in self.metric_by_bucket:
self.metric_by_bucket[bucket_start_timestamp] = {}
metric_by_context = self.metric_by_bucket[bucket_start_timestamp]
self.current_bucket = bucket_start_timestamp
self.current_mbc = metric_by_context
if context not in metric_by_context:
metric_class = self.metric_type_to_class[mtype]
metric_by_context[context] = metric_class(self.formatter, name, tags,
hostname, device_name, self.metric_config.get(metric_class))
metric_by_context[context].sample(value, sample_rate, timestamp)
def create_empty_metrics(self, sample_time_by_context, expiry_timestamp, flush_timestamp, metrics):
# Even if no data is submitted, Counters keep reporting "0" for expiry_seconds. The other Metrics
# (Set, Gauge, Histogram) do not report if no data is submitted
for context, last_sample_time in sample_time_by_context.items():
if last_sample_time < expiry_timestamp:
log.debug("%s hasn't been submitted in %ss. Expiring." % (context, self.expiry_seconds))
self.last_sample_time_by_context.pop(context, None)
else:
metric = Counter(self.formatter, context[0], context[1], context[2], context[3])
metrics += metric.flush(flush_timestamp, self.interval)
def flush(self):
cur_time = time()
flush_cutoff_time = self.calculate_bucket_start(cur_time)
expiry_timestamp = cur_time - self.expiry_seconds
metrics = []
if self.metric_by_bucket:
for bucket_start_timestamp in sorted(self.metric_by_bucket.keys()):
metric_by_context = self.metric_by_bucket[bucket_start_timestamp]
if bucket_start_timestamp < flush_cutoff_time:
not_sampled_in_this_bucket = self.last_sample_time_by_context.copy()
for context, metric in metric_by_context.items():
if metric.last_sample_time < expiry_timestamp:
# This should never happen
log.warning("%s hasn't been submitted in %ss. Expiring." % (context, self.expiry_seconds))
not_sampled_in_this_bucket.pop(context, None)
self.last_sample_time_by_context.pop(context, None)
else:
metrics += metric.flush(bucket_start_timestamp, self.interval)
if isinstance(metric, Counter):
self.last_sample_time_by_context[context] = metric.last_sample_time
not_sampled_in_this_bucket.pop(context, None)
self.create_empty_metrics(not_sampled_in_this_bucket, expiry_timestamp, bucket_start_timestamp, metrics)
del self.metric_by_bucket[bucket_start_timestamp]
else:
if flush_cutoff_time >= self.last_flush_cutoff_time + self.interval:
self.create_empty_metrics(self.last_sample_time_by_context.copy(), expiry_timestamp,
flush_cutoff_time-self.interval, metrics)
# Log a warning regarding metrics with old timestamps being submitted
if self.num_discarded_old_points > 0:
log.warn('%s points were discarded as a result of having an old timestamp' % self.num_discarded_old_points)
self.num_discarded_old_points = 0
# Save some stats.
log.debug("received %s payloads since last flush" % self.count)
self.total_count += self.count
self.count = 0
self.current_bucket = None
self.current_mbc = {}
self.last_flush_cutoff_time = flush_cutoff_time
return metrics
class MetricsAggregator(Aggregator):
def __init__(self, hostname, interval=1.0, expiry_seconds=300,
formatter=None, recent_point_threshold=None,
histogram_aggregates=None, histogram_percentiles=None,
utf8_decoding=False):
super(MetricsAggregator, self).__init__(
hostname,
interval,
expiry_seconds,
formatter,
recent_point_threshold,
histogram_aggregates,
histogram_percentiles,
utf8_decoding
)
self.metrics = {}
self.metric_type_to_class = {
'g': Gauge,
'ct': Count,
'ct-c': MonotonicCount,
'c': Counter,
'h': Histogram,
'ms': Histogram,
's': Set,
'_dd-r': Rate,
}
def submit_metric(self, name, value, mtype, tags=None, hostname=None,
device_name=None, timestamp=None, sample_rate=1):
# Avoid calling extra functions to dedupe tags if there are none
# Keep hostname with empty string to unset it
hostname = hostname if hostname is not None else self.hostname
if tags is None:
context = (name, tuple(), hostname, device_name)
else:
tags = tuple(self.deduplicate_tags(tags))
context = (name, tags, hostname, device_name)
if context not in self.metrics:
metric_class = self.metric_type_to_class[mtype]
self.metrics[context] = metric_class(self.formatter, name, tags,
hostname, device_name, self.metric_config.get(metric_class))
cur_time = time()
if timestamp is not None and cur_time - int(timestamp) > self.recent_point_threshold:
log.debug("Discarding %s - ts = %s , current ts = %s " % (name, timestamp, cur_time))
self.num_discarded_old_points += 1
else:
self.metrics[context].sample(value, sample_rate, timestamp)
def gauge(self, name, value, tags=None, hostname=None, device_name=None, timestamp=None):
self.submit_metric(name, value, 'g', tags, hostname, device_name, timestamp)
def increment(self, name, value=1, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 'c', tags, hostname, device_name)
def decrement(self, name, value=-1, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 'c', tags, hostname, device_name)
def rate(self, name, value, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, '_dd-r', tags, hostname, device_name)
def submit_count(self, name, value, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 'ct', tags, hostname, device_name)
def count_from_counter(self, name, value, tags=None,
hostname=None, device_name=None):
self.submit_metric(name, value, 'ct-c', tags,
hostname, device_name)
def histogram(self, name, value, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 'h', tags, hostname, device_name)
def set(self, name, value, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 's', tags, hostname, device_name)
def flush(self):
timestamp = time()
expiry_timestamp = timestamp - self.expiry_seconds
# Flush points and remove expired metrics. We mutate this dictionary
# while iterating so don't use an iterator.
metrics = []
for context, metric in self.metrics.items():
if metric.last_sample_time < expiry_timestamp:
log.debug("%s hasn't been submitted in %ss. Expiring." % (context, self.expiry_seconds))
del self.metrics[context]
else:
metrics += metric.flush(timestamp, self.interval)
# Log a warning regarding metrics with old timestamps being submitted
if self.num_discarded_old_points > 0:
log.warn('%s points were discarded as a result of having an old timestamp' % self.num_discarded_old_points)
self.num_discarded_old_points = 0
# Save some stats.
log.debug("received %s payloads since last flush" % self.count)
self.total_count += self.count
self.count = 0
return metrics
def get_formatter(config):
formatter = api_formatter
if config['statsd_metric_namespace']:
def metric_namespace_formatter_wrapper(metric, value, timestamp, tags,
hostname=None, device_name=None,
metric_type=None, interval=None):
metric_prefix = config['statsd_metric_namespace']
if metric_prefix[-1] != '.':
metric_prefix += '.'
return api_formatter(metric_prefix + metric, value, timestamp, tags, hostname,
device_name, metric_type, interval)
formatter = metric_namespace_formatter_wrapper
return formatter
def api_formatter(metric, value, timestamp, tags, hostname=None, device_name=None,
metric_type=None, interval=None):
return {
'metric': metric,
'points': [(timestamp, value)],
'tags': tags,
'host': hostname,
'device_name': device_name,
'type': metric_type or MetricTypes.GAUGE,
'interval':interval,
}
| true | true |
f73c7091314ed182cced7aed7727e8fa7b87a2d1 | 696 | py | Python | imdbpy-master/tests/test_http_movie_taglines.py | camillesanchez/history-movie-index2 | 20f053700121f147b315645eed45c58f5014d2c0 | [
"MIT"
] | null | null | null | imdbpy-master/tests/test_http_movie_taglines.py | camillesanchez/history-movie-index2 | 20f053700121f147b315645eed45c58f5014d2c0 | [
"MIT"
] | 7 | 2021-02-02T22:59:25.000Z | 2022-03-12T00:46:05.000Z | imdbpy-master/tests/test_http_movie_taglines.py | camillesanchez/history-movie-index2 | 20f053700121f147b315645eed45c58f5014d2c0 | [
"MIT"
] | null | null | null | def test_movie_taglines_if_single_should_be_a_list_of_phrases(ia):
movie = ia.get_movie('0109151', info=['taglines']) # Matrix (V)
taglines = movie.get('taglines', [])
assert taglines == ["If humans don't want me... why'd they create me?"]
def test_movie_taglines_if_multiple_should_be_a_list_of_phrases(ia):
movie = ia.get_movie('0060666', info=['taglines']) # Manos
taglines = movie.get('taglines', [])
assert len(taglines) == 3
assert taglines[0] == "It's Shocking! It's Beyond Your Imagination!"
def test_movie_taglines_if_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['taglines']) # Ates Parcasi
assert 'taglines' not in movie
| 40.941176 | 75 | 0.708333 | def test_movie_taglines_if_single_should_be_a_list_of_phrases(ia):
movie = ia.get_movie('0109151', info=['taglines'])
taglines = movie.get('taglines', [])
assert taglines == ["If humans don't want me... why'd they create me?"]
def test_movie_taglines_if_multiple_should_be_a_list_of_phrases(ia):
movie = ia.get_movie('0060666', info=['taglines'])
taglines = movie.get('taglines', [])
assert len(taglines) == 3
assert taglines[0] == "It's Shocking! It's Beyond Your Imagination!"
def test_movie_taglines_if_none_should_be_excluded(ia):
movie = ia.get_movie('1863157', info=['taglines'])
assert 'taglines' not in movie
| true | true |
f73c725d0cdd3666e8953a2d622f89e473ce65e4 | 1,166 | py | Python | ms_deisotope/spectrum_graph.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
] | 18 | 2017-09-01T12:26:12.000Z | 2022-02-23T02:31:29.000Z | ms_deisotope/spectrum_graph.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
] | 19 | 2017-03-12T20:40:36.000Z | 2022-03-31T22:50:47.000Z | ms_deisotope/spectrum_graph.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
] | 14 | 2016-05-06T02:25:30.000Z | 2022-03-31T14:40:06.000Z | from ms_deisotope._c.spectrum_graph import (
PathFinder,
MassWrapper,
PeakGroupNode,
PeakNode,
NodeBase,
Path,
SpectrumGraph)
amino_acids = [
MassWrapper('G', 57.02146372057),
MassWrapper('A', 71.03711378471),
MassWrapper('S', 87.03202840427),
MassWrapper('P', 97.05276384884999),
MassWrapper('V', 99.06841391299),
MassWrapper('T', 101.04767846841),
MassWrapper('C', 103.00918478471),
MassWrapper('J', 113.08406397713),
MassWrapper('N', 114.04292744114),
MassWrapper('D', 115.02694302383),
MassWrapper('Q', 128.05857750528),
MassWrapper('K', 128.094963014),
MassWrapper('E', 129.04259308797),
MassWrapper('M', 131.04048491299),
MassWrapper('H', 137.05891185845),
MassWrapper('F', 147.06841391299),
MassWrapper('R', 156.1011110236),
MassWrapper('Y', 163.06332853255),
MassWrapper('W', 186.07931294986),
]
def find_paths(peaks, components=None, error_tolerance=1e-5, merge=False):
if components is None:
components = amino_acids
sequencer = PathFinder(components, error_tolerance)
paths = sequencer.paths(peaks, merge=merge)
return paths
| 30.684211 | 74 | 0.680103 | from ms_deisotope._c.spectrum_graph import (
PathFinder,
MassWrapper,
PeakGroupNode,
PeakNode,
NodeBase,
Path,
SpectrumGraph)
amino_acids = [
MassWrapper('G', 57.02146372057),
MassWrapper('A', 71.03711378471),
MassWrapper('S', 87.03202840427),
MassWrapper('P', 97.05276384884999),
MassWrapper('V', 99.06841391299),
MassWrapper('T', 101.04767846841),
MassWrapper('C', 103.00918478471),
MassWrapper('J', 113.08406397713),
MassWrapper('N', 114.04292744114),
MassWrapper('D', 115.02694302383),
MassWrapper('Q', 128.05857750528),
MassWrapper('K', 128.094963014),
MassWrapper('E', 129.04259308797),
MassWrapper('M', 131.04048491299),
MassWrapper('H', 137.05891185845),
MassWrapper('F', 147.06841391299),
MassWrapper('R', 156.1011110236),
MassWrapper('Y', 163.06332853255),
MassWrapper('W', 186.07931294986),
]
def find_paths(peaks, components=None, error_tolerance=1e-5, merge=False):
if components is None:
components = amino_acids
sequencer = PathFinder(components, error_tolerance)
paths = sequencer.paths(peaks, merge=merge)
return paths
| true | true |
f73c73363c2d28a7a8085fccd409bb81966e8d51 | 892 | py | Python | i3pystatus/updates/aptget.py | juliushaertl/i3pystatus | e2a0097316734b253af841b6cd6dbf01cc3e647c | [
"MIT"
] | null | null | null | i3pystatus/updates/aptget.py | juliushaertl/i3pystatus | e2a0097316734b253af841b6cd6dbf01cc3e647c | [
"MIT"
] | null | null | null | i3pystatus/updates/aptget.py | juliushaertl/i3pystatus | e2a0097316734b253af841b6cd6dbf01cc3e647c | [
"MIT"
] | null | null | null | import os
from i3pystatus.core.command import run_through_shell
from i3pystatus.updates import Backend
class AptGet(Backend):
"""
Gets update count for Debian based distributions.
This mimics the Arch Linux `checkupdates` script
but with apt-get and written in python.
"""
@property
def updates(self):
cache_dir = "/tmp/update-cache-" + os.getenv("USER")
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
command = "apt-get update -o Dir::State::Lists=" + cache_dir
run_through_shell(command.split())
command = "apt-get upgrade -s -o Dir::State::Lists=" + cache_dir
apt = run_through_shell(command.split())
update_count = 0
for line in apt.out.split("\n"):
if line.startswith("Inst"):
update_count += 1
return update_count
Backend = AptGet
| 27.030303 | 72 | 0.633408 | import os
from i3pystatus.core.command import run_through_shell
from i3pystatus.updates import Backend
class AptGet(Backend):
@property
def updates(self):
cache_dir = "/tmp/update-cache-" + os.getenv("USER")
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
command = "apt-get update -o Dir::State::Lists=" + cache_dir
run_through_shell(command.split())
command = "apt-get upgrade -s -o Dir::State::Lists=" + cache_dir
apt = run_through_shell(command.split())
update_count = 0
for line in apt.out.split("\n"):
if line.startswith("Inst"):
update_count += 1
return update_count
Backend = AptGet
| true | true |
f73c73d078a843fc4a35ff98f38b2d3b2a31285a | 8,305 | py | Python | a2ml/tasks_queue/tasks_api.py | gitter-badger/a2ml | 1d9ef6657645b61c64090284ed8fadb1a68b932c | [
"Apache-2.0"
] | null | null | null | a2ml/tasks_queue/tasks_api.py | gitter-badger/a2ml | 1d9ef6657645b61c64090284ed8fadb1a68b932c | [
"Apache-2.0"
] | null | null | null | a2ml/tasks_queue/tasks_api.py | gitter-badger/a2ml | 1d9ef6657645b61c64090284ed8fadb1a68b932c | [
"Apache-2.0"
] | null | null | null | from .celery_app import celeryApp
import logging
import copy
import os
import json
import jsonpickle
from a2ml.api.utils.context import Context
from a2ml.api.a2ml import A2ML
from a2ml.api.a2ml_dataset import A2MLDataset
from a2ml.api.a2ml_experiment import A2MLExperiment
from a2ml.api.a2ml_model import A2MLModel
from a2ml.api.a2ml_project import A2MLProject
from a2ml.server.notification import SyncSender
notificator = SyncSender()
def create_context(params, new_project=False):
if params.get('context'):
ctx = jsonpickle.decode(params['context'])
ctx.set_runs_on_server(True)
ctx.config.set('config', 'use_server', False)
ctx.notificator = notificator
ctx.request_id = params['_request_id']
ctx.setup_logger(format='')
else:
# For Tasks Test Only!
project_path = os.path.join(
os.environ.get('A2ML_PROJECT_PATH', ''), params.get('project_name')
)
ctx = Context(path=project_path, debug = params.get("debug_log", False))
if not new_project:
if params.get("provider"):
ctx.config.set('config', 'providers', [params.get("provider")])
if params.get("source_path"):
ctx.config.set('config', 'source', params.get("source_path"))
tmp_dir = os.path.join(os.path.dirname(__file__), 'tmp')
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
# For Azure, since it package current directory
os.chdir(tmp_dir)
return ctx
def __handle_task_result(self, status, retval, task_id, args, kwargs, einfo):
request_id = args[0]['_request_id']
if status == 'SUCCESS':
notificator.publish_result(request_id, status, retval)
else:
notificator.publish_result(
request_id,
status,
__error_to_result(retval, einfo)
)
def execute_tasks(tasks_func, params):
if os.environ.get('TEST_CALL_CELERY_TASKS'):
return tasks_func(params)
else:
ar = tasks_func.delay(params)
return ar.get()
# Projects
@celeryApp.task(after_return=__handle_task_result)
def new_project_task(params):
return with_context(
params,
lambda ctx: A2MLProject(ctx, None).create(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def list_projects_task(params):
def func(ctx):
res = A2MLProject(ctx, None).list(*params['args'], **params['kwargs'])
return __map_collection_to_name(res, 'projects')
return with_context(params, func)
@celeryApp.task(after_return=__handle_task_result)
def delete_project_task(params):
return with_context(
params,
lambda ctx: A2MLProject(ctx, None).delete(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def select_project_task(params):
return with_context(
params,
lambda ctx: A2MLProject(ctx, None).select(*params['args'], **params['kwargs'])
)
# Datasets
@celeryApp.task(after_return=__handle_task_result)
def new_dataset_task(params):
return with_context(
params,
lambda ctx: A2MLDataset(ctx, None).create(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def list_datasets_task(params):
def func(ctx):
res = A2MLDataset(ctx, None).list(*params['args'], **params['kwargs'])
return __map_collection_to_name(res, 'datasets')
return with_context(params, func)
@celeryApp.task(after_return=__handle_task_result)
def delete_dataset_task(params):
return with_context(
params,
lambda ctx: A2MLDataset(ctx, None).delete(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def select_dataset_task(params):
return with_context(
params,
lambda ctx: A2MLDataset(ctx, None).select(*params['args'], **params['kwargs'])
)
# Experiment
@celeryApp.task(after_return=__handle_task_result)
def list_experiments_task(params):
def func(ctx):
res = A2MLExperiment(ctx, None).list(*params['args'], **params['kwargs'])
return __map_collection_to_name(res, 'experiments')
return with_context(params, func)
@celeryApp.task(after_return=__handle_task_result)
def leaderboard_experiment_task(params):
return with_context(
params,
lambda ctx: A2MLExperiment(ctx, None).leaderboard(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def history_experiment_task(params):
return with_context(
params,
lambda ctx: A2MLExperiment(ctx, None).history(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def start_experiment_task(params):
return with_context(
params,
lambda ctx: A2MLExperiment(ctx, None).start(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def stop_experiment_task(params):
return with_context(
params,
lambda ctx: A2MLExperiment(ctx, None).stop(*params['args'], **params['kwargs'])
)
# Models
@celeryApp.task(after_return=__handle_task_result)
def actual_model_task(params):
return with_context(
params,
lambda ctx: A2MLModel(ctx, None).actual(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def deploy_model_task(params):
return with_context(
params,
lambda ctx: A2MLModel(ctx, None).deploy(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def predict_model_task(params):
return with_context(
params,
lambda ctx: A2MLModel(ctx, None).predict(*params['args'], **params['kwargs'])
)
# Complex tasks
@celeryApp.task(after_return=__handle_task_result)
def import_data_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).import_data(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def train_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).train(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def evaluate_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).evaluate(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def deploy_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).deploy(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def predict_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).predict(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def review_task(params):
# TODO
raise Exception('not inplemented yet')
@celeryApp.task(after_return=__handle_task_result)
def demo_task(params):
import time
request_id = params['_request_id']
for i in range(0, 10):
notificator.publish_log(request_id, 'info', 'log ' + str(i))
time.sleep(2)
notificator.publish_result(request_id, 'SUCCESS', 'done')
def with_context(params, proc):
ctx = create_context(params)
if not 'args' in params:
params['args'] = []
if not 'kwargs' in params:
params['kwargs'] = {}
res = proc(ctx)
ctx.set_runs_on_server(False)
ctx.config.set('config', 'use_server', True)
return {'response': res, 'config': jsonpickle.encode(ctx.config)}
def __exception_message_with_all_causes(e):
if isinstance(e, Exception) and e.__cause__:
return str(e) + ' caused by ' + __exception_message_with_all_causes(e.__cause__)
else:
return str(e)
def __error_to_result(retval, einfo):
res = __exception_message_with_all_causes(retval)
if einfo:
res += '\n' + str(einfo)
return res
def __map_collection_to_name(res, collection_name):
for provder in res.keys():
if collection_name in res[provder]['data']:
res[provder]['data'][collection_name] = list(
map(lambda x: x.get('name'), res[provder]['data'][collection_name])
)
| 29.981949 | 94 | 0.681638 | from .celery_app import celeryApp
import logging
import copy
import os
import json
import jsonpickle
from a2ml.api.utils.context import Context
from a2ml.api.a2ml import A2ML
from a2ml.api.a2ml_dataset import A2MLDataset
from a2ml.api.a2ml_experiment import A2MLExperiment
from a2ml.api.a2ml_model import A2MLModel
from a2ml.api.a2ml_project import A2MLProject
from a2ml.server.notification import SyncSender
notificator = SyncSender()
def create_context(params, new_project=False):
if params.get('context'):
ctx = jsonpickle.decode(params['context'])
ctx.set_runs_on_server(True)
ctx.config.set('config', 'use_server', False)
ctx.notificator = notificator
ctx.request_id = params['_request_id']
ctx.setup_logger(format='')
else:
project_path = os.path.join(
os.environ.get('A2ML_PROJECT_PATH', ''), params.get('project_name')
)
ctx = Context(path=project_path, debug = params.get("debug_log", False))
if not new_project:
if params.get("provider"):
ctx.config.set('config', 'providers', [params.get("provider")])
if params.get("source_path"):
ctx.config.set('config', 'source', params.get("source_path"))
tmp_dir = os.path.join(os.path.dirname(__file__), 'tmp')
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
os.chdir(tmp_dir)
return ctx
def __handle_task_result(self, status, retval, task_id, args, kwargs, einfo):
request_id = args[0]['_request_id']
if status == 'SUCCESS':
notificator.publish_result(request_id, status, retval)
else:
notificator.publish_result(
request_id,
status,
__error_to_result(retval, einfo)
)
def execute_tasks(tasks_func, params):
if os.environ.get('TEST_CALL_CELERY_TASKS'):
return tasks_func(params)
else:
ar = tasks_func.delay(params)
return ar.get()
@celeryApp.task(after_return=__handle_task_result)
def new_project_task(params):
return with_context(
params,
lambda ctx: A2MLProject(ctx, None).create(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def list_projects_task(params):
def func(ctx):
res = A2MLProject(ctx, None).list(*params['args'], **params['kwargs'])
return __map_collection_to_name(res, 'projects')
return with_context(params, func)
@celeryApp.task(after_return=__handle_task_result)
def delete_project_task(params):
return with_context(
params,
lambda ctx: A2MLProject(ctx, None).delete(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def select_project_task(params):
return with_context(
params,
lambda ctx: A2MLProject(ctx, None).select(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def new_dataset_task(params):
return with_context(
params,
lambda ctx: A2MLDataset(ctx, None).create(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def list_datasets_task(params):
def func(ctx):
res = A2MLDataset(ctx, None).list(*params['args'], **params['kwargs'])
return __map_collection_to_name(res, 'datasets')
return with_context(params, func)
@celeryApp.task(after_return=__handle_task_result)
def delete_dataset_task(params):
return with_context(
params,
lambda ctx: A2MLDataset(ctx, None).delete(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def select_dataset_task(params):
return with_context(
params,
lambda ctx: A2MLDataset(ctx, None).select(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def list_experiments_task(params):
def func(ctx):
res = A2MLExperiment(ctx, None).list(*params['args'], **params['kwargs'])
return __map_collection_to_name(res, 'experiments')
return with_context(params, func)
@celeryApp.task(after_return=__handle_task_result)
def leaderboard_experiment_task(params):
return with_context(
params,
lambda ctx: A2MLExperiment(ctx, None).leaderboard(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def history_experiment_task(params):
return with_context(
params,
lambda ctx: A2MLExperiment(ctx, None).history(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def start_experiment_task(params):
return with_context(
params,
lambda ctx: A2MLExperiment(ctx, None).start(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def stop_experiment_task(params):
return with_context(
params,
lambda ctx: A2MLExperiment(ctx, None).stop(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def actual_model_task(params):
return with_context(
params,
lambda ctx: A2MLModel(ctx, None).actual(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def deploy_model_task(params):
return with_context(
params,
lambda ctx: A2MLModel(ctx, None).deploy(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def predict_model_task(params):
return with_context(
params,
lambda ctx: A2MLModel(ctx, None).predict(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def import_data_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).import_data(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def train_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).train(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def evaluate_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).evaluate(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def deploy_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).deploy(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def predict_task(params):
return with_context(
params,
lambda ctx: A2ML(ctx).predict(*params['args'], **params['kwargs'])
)
@celeryApp.task(after_return=__handle_task_result)
def review_task(params):
raise Exception('not inplemented yet')
@celeryApp.task(after_return=__handle_task_result)
def demo_task(params):
import time
request_id = params['_request_id']
for i in range(0, 10):
notificator.publish_log(request_id, 'info', 'log ' + str(i))
time.sleep(2)
notificator.publish_result(request_id, 'SUCCESS', 'done')
def with_context(params, proc):
ctx = create_context(params)
if not 'args' in params:
params['args'] = []
if not 'kwargs' in params:
params['kwargs'] = {}
res = proc(ctx)
ctx.set_runs_on_server(False)
ctx.config.set('config', 'use_server', True)
return {'response': res, 'config': jsonpickle.encode(ctx.config)}
def __exception_message_with_all_causes(e):
if isinstance(e, Exception) and e.__cause__:
return str(e) + ' caused by ' + __exception_message_with_all_causes(e.__cause__)
else:
return str(e)
def __error_to_result(retval, einfo):
res = __exception_message_with_all_causes(retval)
if einfo:
res += '\n' + str(einfo)
return res
def __map_collection_to_name(res, collection_name):
for provder in res.keys():
if collection_name in res[provder]['data']:
res[provder]['data'][collection_name] = list(
map(lambda x: x.get('name'), res[provder]['data'][collection_name])
)
| true | true |
f73c75189ffae3013e9c04bbd3dce41c2d10c33e | 263 | py | Python | deliver/ia369/iareadurl.py | mariecpereira/Extracao-de-Caracteristicas-Corpo-Caloso | f094c706db815f91cf61d1d501c2a9030b9b54d3 | [
"MIT"
] | 7 | 2015-02-18T17:21:20.000Z | 2016-10-04T19:14:16.000Z | deliver/ia369/iareadurl.py | mariecpereira/Extracao-de-Caracteristicas-Corpo-Caloso | f094c706db815f91cf61d1d501c2a9030b9b54d3 | [
"MIT"
] | null | null | null | deliver/ia369/iareadurl.py | mariecpereira/Extracao-de-Caracteristicas-Corpo-Caloso | f094c706db815f91cf61d1d501c2a9030b9b54d3 | [
"MIT"
] | 20 | 2017-06-26T17:40:28.000Z | 2021-09-15T13:47:19.000Z | # -*- encoding: utf-8 -*-
# Module iareadurl
def iareadurl(url):
from StringIO import StringIO
import urllib
import PIL
import adpil
file = StringIO(urllib.urlopen(url).read())
img = PIL.Image.open(file)
return adpil.pil2array(img)
| 18.785714 | 47 | 0.661597 |
def iareadurl(url):
from StringIO import StringIO
import urllib
import PIL
import adpil
file = StringIO(urllib.urlopen(url).read())
img = PIL.Image.open(file)
return adpil.pil2array(img)
| true | true |
f73c7584ac37c9c7667191eb3a7b8d3eab5bc8eb | 6,528 | py | Python | predict.py | DilipJainDj/Flower-Image-Classifier | ab7c47d176b12bae51ee33e427f3d95c57d07416 | [
"MIT"
] | 1 | 2020-05-23T11:09:09.000Z | 2020-05-23T11:09:09.000Z | predict.py | DilipJainDj/Flower-Image-Classifier | ab7c47d176b12bae51ee33e427f3d95c57d07416 | [
"MIT"
] | null | null | null | predict.py | DilipJainDj/Flower-Image-Classifier | ab7c47d176b12bae51ee33e427f3d95c57d07416 | [
"MIT"
] | null | null | null | """
@author: Dilip Jain
@title: Image Classifier training file
"""
import argparse
import json
import PIL
import torch
import numpy as np
from math import ceil
from train import check_gpu
from torchvision import models
# ------------------------------------------------------------------------------- #
# Function Definitions
# ------------------------------------------------------------------------------- #
# Function arg_parser() parses keyword arguments from the command line
def arg_parser():
# Define a parser
parser = argparse.ArgumentParser(description="Neural Network Settings")
# Point towards image for prediction
parser.add_argument('--image', type=str, help='Point to impage file for prediction.',required=True)
# Load checkpoint created by train.py
parser.add_argument('--checkpoint', type=str, help='Point to checkpoint file as str.',required=True)
# Specify top-k
parser.add_argument('--top_k', type=int, help='Choose top K matches as int.')
# Import category names
parser.add_argument('--category_names', type=str, help='Mapping from categories to real names.')
# Add GPU Option to parser
parser.add_argument('--gpu', action="store_true", help='Use GPU + Cuda for calculations')
# Parse args
args = parser.parse_args()
return args
# Function load_checkpoint(checkpoint_path) loads our saved deep learning model from checkpoint
def load_checkpoint(checkpoint_path):
# Load the saved file
checkpoint = torch.load("my_checkpoint.pth")
# Load Defaults if none specified
if checkpoint['architecture'] == 'vgg16':
model = models.vgg16(pretrained=True)
model.name = "vgg16"
else:
exec("model = models.{}(pretrained=True)".checkpoint['architecture'])
model.name = checkpoint['architecture']
# Freeze parameters so we don't backprop through them
for param in model.parameters(): param.requires_grad = False
# Load stuff from checkpoint
model.class_to_idx = checkpoint['class_to_idx']
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_dict'])
return model
# Function process_image(image_path) performs cropping, scaling of image for our model
def process_image(image_path):
test_image = PIL.Image.open(image_path)
# Get original dimensions
orig_width, orig_height = test_image.size
# Find shorter size and create settings to crop shortest side to 256
if orig_width < orig_height: resize_size=[256, 256**600]
else: resize_size=[256**600, 256]
test_image.thumbnail(size=resize_size)
# Find pixels to crop on to create 224x224 image
center = orig_width/4, orig_height/4
left, top, right, bottom = center[0]-(244/2), center[1]-(244/2), center[0]+(244/2), center[1]+(244/2)
test_image = test_image.crop((left, top, right, bottom))
# Converrt to numpy - 244x244 image w/ 3 channels (RGB)
np_image = np.array(test_image)/255 # Divided by 255 because imshow() expects integers (0:1)!!
# Normalize each color channel
normalise_means = [0.485, 0.456, 0.406]
normalise_std = [0.229, 0.224, 0.225]
np_image = (np_image-normalise_means)/normalise_std
# Set the color to the first channel
np_image = np_image.transpose(2, 0, 1)
return np_image
def predict(image_tensor, model, device, cat_to_name, top_k):
''' Predict the class (or classes) of an image using a trained deep learning model.
image_path: string. Path to image, directly to image and not to folder.
model: pytorch neural network.
top_k: integer. The top K classes to be calculated
returns top_probabilities(k), top_labels
'''
# check top_k
if type(top_k) == type(None):
top_k = 5
print("Top K not specified, assuming K=5.")
# Set model to evaluate
model.eval();
# Convert image from numpy to torch
torch_image = torch.from_numpy(np.expand_dims(image_tensor,
axis=0)).type(torch.FloatTensor)
model=model.cpu()
# Find probabilities (results) by passing through the function (note the log softmax means that its on a log scale)
log_probs = model.forward(torch_image)
# Convert to linear scale
linear_probs = torch.exp(log_probs)
# Find the top 5 results
top_probs, top_labels = linear_probs.topk(top_k)
# Detatch all of the details
top_probs = np.array(top_probs.detach())[0] # This is not the correct way to do it but the correct way isnt working thanks to cpu/gpu issues so I don't care.
top_labels = np.array(top_labels.detach())[0]
# Convert to classes
idx_to_class = {val: key for key, val in
model.class_to_idx.items()}
top_labels = [idx_to_class[lab] for lab in top_labels]
top_flowers = [cat_to_name[lab] for lab in top_labels]
return top_probs, top_labels, top_flowers
def print_probability(probs, flowers):
"""
Converts two lists into a dictionary to print on screen
"""
for i, j in enumerate(zip(flowers, probs)):
print ("Rank {}:".format(i+1),
"Flower: {}, liklihood: {}%".format(j[1], ceil(j[0]*100)))
# =============================================================================
# Main Function
# =============================================================================
def main():
"""
Executing relevant functions
"""
# Get Keyword Args for Prediction
args = arg_parser()
# Load categories to names json file
with open(args.category_names, 'r') as f:
cat_to_name = json.load(f)
# Load model trained with train.py
model = load_checkpoint(args.checkpoint)
# Process Image
image_tensor = process_image(args.image)
# Check for GPU
device = check_gpu(gpu_arg=args.gpu);
# Use `processed_image` to predict the top K most likely classes
top_probs, top_labels, top_flowers = predict(image_tensor, model,
device, cat_to_name,
args.top_k)
# Print out probabilities
print_probability(top_flowers, top_probs)
# =============================================================================
# Run Program
# =============================================================================
if __name__ == '__main__': main()
| 34 | 161 | 0.613358 |
import argparse
import json
import PIL
import torch
import numpy as np
from math import ceil
from train import check_gpu
from torchvision import models
def arg_parser():
parser = argparse.ArgumentParser(description="Neural Network Settings")
parser.add_argument('--image', type=str, help='Point to impage file for prediction.',required=True)
parser.add_argument('--checkpoint', type=str, help='Point to checkpoint file as str.',required=True)
parser.add_argument('--top_k', type=int, help='Choose top K matches as int.')
parser.add_argument('--category_names', type=str, help='Mapping from categories to real names.')
parser.add_argument('--gpu', action="store_true", help='Use GPU + Cuda for calculations')
args = parser.parse_args()
return args
def load_checkpoint(checkpoint_path):
checkpoint = torch.load("my_checkpoint.pth")
if checkpoint['architecture'] == 'vgg16':
model = models.vgg16(pretrained=True)
model.name = "vgg16"
else:
exec("model = models.{}(pretrained=True)".checkpoint['architecture'])
model.name = checkpoint['architecture']
for param in model.parameters(): param.requires_grad = False
# Load stuff from checkpoint
model.class_to_idx = checkpoint['class_to_idx']
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_dict'])
return model
# Function process_image(image_path) performs cropping, scaling of image for our model
def process_image(image_path):
test_image = PIL.Image.open(image_path)
# Get original dimensions
orig_width, orig_height = test_image.size
# Find shorter size and create settings to crop shortest side to 256
if orig_width < orig_height: resize_size=[256, 256**600]
else: resize_size=[256**600, 256]
test_image.thumbnail(size=resize_size)
# Find pixels to crop on to create 224x224 image
center = orig_width/4, orig_height/4
left, top, right, bottom = center[0]-(244/2), center[1]-(244/2), center[0]+(244/2), center[1]+(244/2)
test_image = test_image.crop((left, top, right, bottom))
# Converrt to numpy - 244x244 image w/ 3 channels (RGB)
np_image = np.array(test_image)/255 # Divided by 255 because imshow() expects integers (0:1)!!
# Normalize each color channel
normalise_means = [0.485, 0.456, 0.406]
normalise_std = [0.229, 0.224, 0.225]
np_image = (np_image-normalise_means)/normalise_std
# Set the color to the first channel
np_image = np_image.transpose(2, 0, 1)
return np_image
def predict(image_tensor, model, device, cat_to_name, top_k):
# check top_k
if type(top_k) == type(None):
top_k = 5
print("Top K not specified, assuming K=5.")
# Set model to evaluate
model.eval();
# Convert image from numpy to torch
torch_image = torch.from_numpy(np.expand_dims(image_tensor,
axis=0)).type(torch.FloatTensor)
model=model.cpu()
# Find probabilities (results) by passing through the function (note the log softmax means that its on a log scale)
log_probs = model.forward(torch_image)
# Convert to linear scale
linear_probs = torch.exp(log_probs)
# Find the top 5 results
top_probs, top_labels = linear_probs.topk(top_k)
# Detatch all of the details
top_probs = np.array(top_probs.detach())[0] # This is not the correct way to do it but the correct way isnt working thanks to cpu/gpu issues so I don't care.
top_labels = np.array(top_labels.detach())[0]
idx_to_class = {val: key for key, val in
model.class_to_idx.items()}
top_labels = [idx_to_class[lab] for lab in top_labels]
top_flowers = [cat_to_name[lab] for lab in top_labels]
return top_probs, top_labels, top_flowers
def print_probability(probs, flowers):
for i, j in enumerate(zip(flowers, probs)):
print ("Rank {}:".format(i+1),
"Flower: {}, liklihood: {}%".format(j[1], ceil(j[0]*100)))
def main():
args = arg_parser()
with open(args.category_names, 'r') as f:
cat_to_name = json.load(f)
model = load_checkpoint(args.checkpoint)
image_tensor = process_image(args.image)
device = check_gpu(gpu_arg=args.gpu);
top_probs, top_labels, top_flowers = predict(image_tensor, model,
device, cat_to_name,
args.top_k)
print_probability(top_flowers, top_probs)
if __name__ == '__main__': main()
| true | true |
f73c75f5a3a22f57f8ba11dd5183981fb50a6ac7 | 995 | py | Python | problems/CR/auto/problem380_CR.py | sunandita/ICAPS_Summer_School_RAE_2020 | a496b62185bcfdd2c76eb7986ae99cfa85708d28 | [
"BSD-3-Clause"
] | 5 | 2020-10-15T14:40:03.000Z | 2021-08-20T17:45:41.000Z | problems/CR/auto/problem380_CR.py | sunandita/ICAPS_Summer_School_RAE_2020 | a496b62185bcfdd2c76eb7986ae99cfa85708d28 | [
"BSD-3-Clause"
] | null | null | null | problems/CR/auto/problem380_CR.py | sunandita/ICAPS_Summer_School_RAE_2020 | a496b62185bcfdd2c76eb7986ae99cfa85708d28 | [
"BSD-3-Clause"
] | 2 | 2020-10-15T07:06:14.000Z | 2020-10-15T17:33:01.000Z | __author__ = 'patras'
from domain_chargeableRobot import *
from timer import DURATION
from state import state
DURATION.TIME = {
'put': 2,
'take': 2,
'perceive': 2,
'charge': 2,
'move': 2,
'moveToEmergency': 2,
'moveCharger': 2,
'addressEmergency': 2,
'wait': 2,
}
DURATION.COUNTER = {
'put': 2,
'take': 2,
'perceive': 2,
'charge': 2,
'move': 2,
'moveToEmergency': 2,
'moveCharger': 2,
'addressEmergency': 2,
'wait': 2,
}
rv.LOCATIONS = [1, 2, 3, 4]
rv.EDGES = {1: [3], 2: [3], 3: [1, 2, 4], 4: [3]}
rv.OBJECTS=['o1']
rv.ROBOTS=['r1']
def ResetState():
state.loc = {'r1': 3}
state.charge = {'r1': 3}
state.load = {'r1': NIL}
state.pos = {'c1': 1, 'o1': UNK}
state.containers = { 1:[],2:[],3:[],4:['o1'],}
state.emergencyHandling = {'r1': False, 'r2': False}
state.view = {}
for l in rv.LOCATIONS:
state.view[l] = False
tasks = {
1: [['fetch', 'r1', 'o1']],
}
eventsEnv = {
} | 19.509804 | 56 | 0.523618 | __author__ = 'patras'
from domain_chargeableRobot import *
from timer import DURATION
from state import state
DURATION.TIME = {
'put': 2,
'take': 2,
'perceive': 2,
'charge': 2,
'move': 2,
'moveToEmergency': 2,
'moveCharger': 2,
'addressEmergency': 2,
'wait': 2,
}
DURATION.COUNTER = {
'put': 2,
'take': 2,
'perceive': 2,
'charge': 2,
'move': 2,
'moveToEmergency': 2,
'moveCharger': 2,
'addressEmergency': 2,
'wait': 2,
}
rv.LOCATIONS = [1, 2, 3, 4]
rv.EDGES = {1: [3], 2: [3], 3: [1, 2, 4], 4: [3]}
rv.OBJECTS=['o1']
rv.ROBOTS=['r1']
def ResetState():
state.loc = {'r1': 3}
state.charge = {'r1': 3}
state.load = {'r1': NIL}
state.pos = {'c1': 1, 'o1': UNK}
state.containers = { 1:[],2:[],3:[],4:['o1'],}
state.emergencyHandling = {'r1': False, 'r2': False}
state.view = {}
for l in rv.LOCATIONS:
state.view[l] = False
tasks = {
1: [['fetch', 'r1', 'o1']],
}
eventsEnv = {
} | true | true |
f73c77c4bf18eec76e0cb3c91065da3c33bb40a0 | 4,231 | py | Python | library/inky/eeprom.py | fsargent/inky | 54684464b2f35bfd52208cdfb922c09685644181 | [
"MIT"
] | 1 | 2021-07-21T16:38:38.000Z | 2021-07-21T16:38:38.000Z | library/inky/eeprom.py | fsargent/inky | 54684464b2f35bfd52208cdfb922c09685644181 | [
"MIT"
] | null | null | null | library/inky/eeprom.py | fsargent/inky | 54684464b2f35bfd52208cdfb922c09685644181 | [
"MIT"
] | 1 | 2021-12-11T18:37:13.000Z | 2021-12-11T18:37:13.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Inky display-type EEPROM tools."""
import datetime
import struct
EEP_ADDRESS = 0x50
EEP_WP = 12
DISPLAY_VARIANT = [
None,
'Red pHAT (High-Temp)',
'Yellow wHAT',
'Black wHAT',
'Black pHAT',
'Yellow pHAT',
'Red wHAT',
'Red wHAT (High-Temp)',
'Red wHAT',
None,
'Black pHAT (SSD1608)',
'Red pHAT (SSD1608)',
'Yellow pHAT (SSD1608)',
None,
'7-Colour (UC8159)'
]
class EPDType:
"""Class to represent EPD EEPROM structure."""
valid_colors = [None, 'black', 'red', 'yellow', None, '7colour']
def __init__(self, width, height, color, pcb_variant, display_variant, write_time=None):
"""Initialise new EEPROM data structure."""
self.width = width
self.height = height
self.color = color
if type(color) == str:
self.set_color(color)
self.pcb_variant = pcb_variant
self.display_variant = display_variant
self.eeprom_write_time = str(datetime.datetime.now()) if write_time is None else write_time
def __repr__(self):
"""Return string representation of EEPROM data structure."""
return """Display: {}x{}
Color: {}
PCB Variant: {}
Display Variant: {}
Time: {}""".format(self.width,
self.height,
self.get_color(),
self.pcb_variant / 10.0,
self.display_variant,
self.eeprom_write_time)
@classmethod
def from_bytes(class_object, data):
"""Initialise new EEPROM data structure from a bytes-like object or list."""
data = bytearray(data)
data = struct.unpack('<HHBBB22p', data)
return class_object(*data)
def update_eeprom_write_time(self):
"""Update the stored write time."""
self.eeprom_write_time = str(datetime.datetime.now())
def encode(self):
"""Return a bytearray representing the EEPROM data structure."""
return struct.pack('<HHBBB22p',
self.width,
self.height,
self.color,
self.pcb_variant,
self.display_variant,
str(datetime.datetime.now()).encode("ASCII"))
def to_list(self):
"""Return a list of bytes representing the EEPROM data structure."""
return [ord(c) for c in self.encode()]
def set_color(self, color):
"""Set the stored colour value."""
try:
self.color = self.valid_colors.index(color)
except IndexError:
raise ValueError('Invalid colour: {}'.format(color))
def get_color(self):
"""Get the stored colour value."""
try:
return self.valid_colors[self.color]
except IndexError:
return None
def get_variant(self):
"""Return text name of the display variant."""
try:
return DISPLAY_VARIANT[self.display_variant]
except IndexError:
return None
# Normal Yellow wHAT
yellow_what_1_E = EPDType(400, 300, color='yellow', pcb_variant=12, display_variant=2)
# Normal Black wHAT
black_what_1_E = EPDType(400, 300, color='black', pcb_variant=12, display_variant=3)
# Normal Black pHAT
black_phat_1_E = EPDType(212, 104, color='black', pcb_variant=12, display_variant=4)
# Hightemp Red pHAT
red_small_1_E = EPDType(212, 104, color='red', pcb_variant=12, display_variant=1)
def read_eeprom(i2c_bus=None):
"""Return a class representing EEPROM contents, or none."""
try:
if i2c_bus is None:
try:
from smbus2 import SMBus
except ImportError:
raise ImportError('This library requires the smbus2 module\nInstall with: sudo pip install smbus2')
i2c_bus = SMBus(1)
i2c_bus.write_i2c_block_data(EEP_ADDRESS, 0x00, [0x00])
return EPDType.from_bytes(i2c_bus.read_i2c_block_data(EEP_ADDRESS, 0, 29))
except IOError:
return None
def main(args):
"""EEPROM Test Function."""
print(read_eeprom())
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| 29.17931 | 115 | 0.601513 |
import datetime
import struct
EEP_ADDRESS = 0x50
EEP_WP = 12
DISPLAY_VARIANT = [
None,
'Red pHAT (High-Temp)',
'Yellow wHAT',
'Black wHAT',
'Black pHAT',
'Yellow pHAT',
'Red wHAT',
'Red wHAT (High-Temp)',
'Red wHAT',
None,
'Black pHAT (SSD1608)',
'Red pHAT (SSD1608)',
'Yellow pHAT (SSD1608)',
None,
'7-Colour (UC8159)'
]
class EPDType:
valid_colors = [None, 'black', 'red', 'yellow', None, '7colour']
def __init__(self, width, height, color, pcb_variant, display_variant, write_time=None):
self.width = width
self.height = height
self.color = color
if type(color) == str:
self.set_color(color)
self.pcb_variant = pcb_variant
self.display_variant = display_variant
self.eeprom_write_time = str(datetime.datetime.now()) if write_time is None else write_time
def __repr__(self):
return """Display: {}x{}
Color: {}
PCB Variant: {}
Display Variant: {}
Time: {}""".format(self.width,
self.height,
self.get_color(),
self.pcb_variant / 10.0,
self.display_variant,
self.eeprom_write_time)
@classmethod
def from_bytes(class_object, data):
data = bytearray(data)
data = struct.unpack('<HHBBB22p', data)
return class_object(*data)
def update_eeprom_write_time(self):
self.eeprom_write_time = str(datetime.datetime.now())
def encode(self):
return struct.pack('<HHBBB22p',
self.width,
self.height,
self.color,
self.pcb_variant,
self.display_variant,
str(datetime.datetime.now()).encode("ASCII"))
def to_list(self):
return [ord(c) for c in self.encode()]
def set_color(self, color):
try:
self.color = self.valid_colors.index(color)
except IndexError:
raise ValueError('Invalid colour: {}'.format(color))
def get_color(self):
try:
return self.valid_colors[self.color]
except IndexError:
return None
def get_variant(self):
try:
return DISPLAY_VARIANT[self.display_variant]
except IndexError:
return None
yellow_what_1_E = EPDType(400, 300, color='yellow', pcb_variant=12, display_variant=2)
black_what_1_E = EPDType(400, 300, color='black', pcb_variant=12, display_variant=3)
black_phat_1_E = EPDType(212, 104, color='black', pcb_variant=12, display_variant=4)
red_small_1_E = EPDType(212, 104, color='red', pcb_variant=12, display_variant=1)
def read_eeprom(i2c_bus=None):
try:
if i2c_bus is None:
try:
from smbus2 import SMBus
except ImportError:
raise ImportError('This library requires the smbus2 module\nInstall with: sudo pip install smbus2')
i2c_bus = SMBus(1)
i2c_bus.write_i2c_block_data(EEP_ADDRESS, 0x00, [0x00])
return EPDType.from_bytes(i2c_bus.read_i2c_block_data(EEP_ADDRESS, 0, 29))
except IOError:
return None
def main(args):
print(read_eeprom())
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| true | true |
f73c78beccf0f6e7740d9c8958e84123b0179fe5 | 596 | py | Python | pattern/tests/tc6.py | ocirne/puzzle-solver | 17b4bbb1be267d60977b7baae1b0ec8265ad7576 | [
"Unlicense"
] | null | null | null | pattern/tests/tc6.py | ocirne/puzzle-solver | 17b4bbb1be267d60977b7baae1b0ec8265ad7576 | [
"Unlicense"
] | null | null | null | pattern/tests/tc6.py | ocirne/puzzle-solver | 17b4bbb1be267d60977b7baae1b0ec8265ad7576 | [
"Unlicense"
] | null | null | null |
class TestCase(object):
M = 10
rows = [
(3,2),
(1,1,3),
(1,4),
(2,),
(3,),
(1,3),
(4,),
(8,),
(8,),
(6,),
]
cols = [
(3,),
(3,3),
(1,1,3),
(2,3),
(3,),
(3,),
(1,3),
(8,),
(7,),
(3,3),
]
solution = """
_###____##
_#_#___###
_#____####
_______##_
_______###
__#____###
______####
########__
########__
######____
"""
| 13.860465 | 23 | 0.199664 |
class TestCase(object):
M = 10
rows = [
(3,2),
(1,1,3),
(1,4),
(2,),
(3,),
(1,3),
(4,),
(8,),
(8,),
(6,),
]
cols = [
(3,),
(3,3),
(1,1,3),
(2,3),
(3,),
(3,),
(1,3),
(8,),
(7,),
(3,3),
]
solution = """
_###____##
_#_#___###
_#____####
_______##_
_______###
__#____###
______####
########__
########__
######____
"""
| true | true |
f73c7b2a1eb0c6ac220d81bcf41e82aee1c33b8f | 936 | py | Python | human_evaluation/computer.py | Guaguago/PPLM | c03b184803c3d57851016c788b41f54153547cc4 | [
"Apache-2.0"
] | null | null | null | human_evaluation/computer.py | Guaguago/PPLM | c03b184803c3d57851016c788b41f54153547cc4 | [
"Apache-2.0"
] | null | null | null | human_evaluation/computer.py | Guaguago/PPLM | c03b184803c3d57851016c788b41f54153547cc4 | [
"Apache-2.0"
] | null | null | null | from run_pplm import run_pplm_example
if __name__ == '__main__':
prefix = ['The orange', 'The spider man', 'my father']
for p in prefix:
with open('demos/computer', 'a') as file:
file.write(
'========================================================================================================================================================\n')
file.write('【{}】\n'.format(p))
run_pplm_example(
cond_text=p,
num_samples=1,
bag_of_words='technology',
length=50,
stepsize=0.03,
sample=True,
num_iterations=3,
window_length=5,
gamma=1.5,
gm_scale=0.95,
kl_scale=0.01,
verbosity='regular',
file=file,
generation_method='vad_abs'
)
| 34.666667 | 173 | 0.372863 | from run_pplm import run_pplm_example
if __name__ == '__main__':
prefix = ['The orange', 'The spider man', 'my father']
for p in prefix:
with open('demos/computer', 'a') as file:
file.write(
'========================================================================================================================================================\n')
file.write('【{}】\n'.format(p))
run_pplm_example(
cond_text=p,
num_samples=1,
bag_of_words='technology',
length=50,
stepsize=0.03,
sample=True,
num_iterations=3,
window_length=5,
gamma=1.5,
gm_scale=0.95,
kl_scale=0.01,
verbosity='regular',
file=file,
generation_method='vad_abs'
)
| true | true |
f73c7b56b51b4c3ca1667432fe6a75ca5922402c | 552 | py | Python | attendance/migrations/0002_auto_20200419_1255.py | AhteshamSid/College_school_management_system | a8504708ea2f347d18d4ac59198f29d05c0374d2 | [
"MIT"
] | null | null | null | attendance/migrations/0002_auto_20200419_1255.py | AhteshamSid/College_school_management_system | a8504708ea2f347d18d4ac59198f29d05c0374d2 | [
"MIT"
] | null | null | null | attendance/migrations/0002_auto_20200419_1255.py | AhteshamSid/College_school_management_system | a8504708ea2f347d18d4ac59198f29d05c0374d2 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-04-19 12:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('student', '0012_auto_20200419_1255'),
('attendance', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='studentattendance',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='student.EnrolledStudent'),
),
]
| 26.285714 | 112 | 0.623188 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('student', '0012_auto_20200419_1255'),
('attendance', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='studentattendance',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='student.EnrolledStudent'),
),
]
| true | true |
f73c7b644d4faef4d0a37680525e5d728c4ad3d3 | 4,651 | py | Python | tests/v2/test_delete_business.py | miritih/WeConnect | c839f8504f8f2c922b0828c1ada2863fbaa15ee1 | [
"MIT"
] | null | null | null | tests/v2/test_delete_business.py | miritih/WeConnect | c839f8504f8f2c922b0828c1ada2863fbaa15ee1 | [
"MIT"
] | 4 | 2018-02-28T15:17:26.000Z | 2018-05-20T06:59:31.000Z | tests/v2/test_delete_business.py | miritih/WeConnect | c839f8504f8f2c922b0828c1ada2863fbaa15ee1 | [
"MIT"
] | 1 | 2018-03-08T17:50:30.000Z | 2018-03-08T17:50:30.000Z | import unittest
import json
from app import create_app
from app.models.v2 import Business
class DeleteBusinessTestCase(unittest.TestCase):
"""This class represents the api test case"""
def setUp(self):
"""
Will be called before every test
"""
self.app = create_app('testing')
self.app.app_context().push()
self.client = self.app.test_client
self.user = {
"username": "mwenda",
"email": "ericmwenda5@gmail.com",
"password": "qwerty123!@#",
"first_name": "eric",
"last_name": "Miriti"
}
self.logins = {
"username": "mwenda",
"password": "qwerty123!@#"
}
self.business = {
"name": "Andela",
"location": "Nairobi,Kenya",
"category": "Tech",
"description": "Epic"
}
self.client().post(
'/api/v2/auth/register',
data=json.dumps(self.user),
content_type='application/json'
)
self.login = self.client().post(
'/api/v2/auth/login',
data=json.dumps(self.logins),
content_type='application/json'
)
self.data = json.loads(self.login.get_data(as_text=True))
# get the token to be used by tests
self.token = self.data['auth_token']
def tearDown(self):
""" clear data after every test"""
Business.query.delete()
def test_can_delete_successfully(self):
"""Tests that a business can be Deleted successfully"""
self.client().post(
'/api/v2/businesses',
data=json.dumps(self.business),
headers={
"content-type": "application/json",
"access-token": self.token
})
bsid = Business.query.first() # Get the last created Record
res2 = self.client().delete(
'/api/v2/businesses/' + str(bsid.id),
headers={
"content-type": "application/json",
"access-token": self.token
})
self.assertEqual(res2.status_code, 201)
self.assertIn("Business Deleted", str(res2.data))
def test_cannot_delete_empty(self):
"""Tests that cannot delete a business that doesn't exist"""
res2 = self.client().delete(
'/api/v2/businesses/1',
headers={
"content-type": "application/json",
"access-token": self.token
}
)
self.assertEqual(res2.status_code, 401)
self.assertIn("Business not found", str(res2.data))
def can_only_delete_own_business(self):
"""test that one can only delete a business they created """
res2 = self.client().delete(
'/api/v2/businesses/1',
headers={
"content-type": "application/json",
"access-token": self.token
}
)
self.assertEqual(res2.status_code, 401)
self.assertIn(
"Sorry! You can only delete your business!!", str(res2.data))
def test_can_only_delete_own_business(self):
"""Tests that users cannot delete other users businesses"""
self.client().post(
'/api/v2/auth/register',
data=json.dumps({
"username": "Miritim",
"email": "ericmwenda552@gmail.com",
"password": "qwerty123!@#",
"first_name": "eric",
"last_name": "Miriti"
}),
content_type='application/json'
)
login = self.client().post(
'/api/v2/auth/login',
data=json.dumps({
"username": "Miritim",
"password": "qwerty123!@#"
}),
content_type='application/json'
)
token = json.loads(login.data.decode("utf-8"))
bs = self.client().post(
'/api/v2/businesses',
data=json.dumps(self.business),
headers={
"content-type": "application/json",
"access-token": token['auth_token']
}
)
response = json.loads(bs.data.decode('utf-8'))
res2 = self.client().delete(
'/api/v2/businesses/' + str(response['Business']['id']),
headers={
"content-type": "application/json",
"access-token": self.token
}
)
self.assertEqual(res2.status_code, 401)
self.assertIn("Sorry! You can only delete your business",
str(res2.data))
| 32.985816 | 73 | 0.517523 | import unittest
import json
from app import create_app
from app.models.v2 import Business
class DeleteBusinessTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app.app_context().push()
self.client = self.app.test_client
self.user = {
"username": "mwenda",
"email": "ericmwenda5@gmail.com",
"password": "qwerty123!@#",
"first_name": "eric",
"last_name": "Miriti"
}
self.logins = {
"username": "mwenda",
"password": "qwerty123!@#"
}
self.business = {
"name": "Andela",
"location": "Nairobi,Kenya",
"category": "Tech",
"description": "Epic"
}
self.client().post(
'/api/v2/auth/register',
data=json.dumps(self.user),
content_type='application/json'
)
self.login = self.client().post(
'/api/v2/auth/login',
data=json.dumps(self.logins),
content_type='application/json'
)
self.data = json.loads(self.login.get_data(as_text=True))
self.token = self.data['auth_token']
def tearDown(self):
Business.query.delete()
def test_can_delete_successfully(self):
self.client().post(
'/api/v2/businesses',
data=json.dumps(self.business),
headers={
"content-type": "application/json",
"access-token": self.token
})
bsid = Business.query.first()
res2 = self.client().delete(
'/api/v2/businesses/' + str(bsid.id),
headers={
"content-type": "application/json",
"access-token": self.token
})
self.assertEqual(res2.status_code, 201)
self.assertIn("Business Deleted", str(res2.data))
def test_cannot_delete_empty(self):
res2 = self.client().delete(
'/api/v2/businesses/1',
headers={
"content-type": "application/json",
"access-token": self.token
}
)
self.assertEqual(res2.status_code, 401)
self.assertIn("Business not found", str(res2.data))
def can_only_delete_own_business(self):
res2 = self.client().delete(
'/api/v2/businesses/1',
headers={
"content-type": "application/json",
"access-token": self.token
}
)
self.assertEqual(res2.status_code, 401)
self.assertIn(
"Sorry! You can only delete your business!!", str(res2.data))
def test_can_only_delete_own_business(self):
self.client().post(
'/api/v2/auth/register',
data=json.dumps({
"username": "Miritim",
"email": "ericmwenda552@gmail.com",
"password": "qwerty123!@#",
"first_name": "eric",
"last_name": "Miriti"
}),
content_type='application/json'
)
login = self.client().post(
'/api/v2/auth/login',
data=json.dumps({
"username": "Miritim",
"password": "qwerty123!@#"
}),
content_type='application/json'
)
token = json.loads(login.data.decode("utf-8"))
bs = self.client().post(
'/api/v2/businesses',
data=json.dumps(self.business),
headers={
"content-type": "application/json",
"access-token": token['auth_token']
}
)
response = json.loads(bs.data.decode('utf-8'))
res2 = self.client().delete(
'/api/v2/businesses/' + str(response['Business']['id']),
headers={
"content-type": "application/json",
"access-token": self.token
}
)
self.assertEqual(res2.status_code, 401)
self.assertIn("Sorry! You can only delete your business",
str(res2.data))
| true | true |
f73c7bbc273be583cd008a58512d9af43393a5b6 | 4,790 | py | Python | scenesim/display/geometry.py | pbattaglia/scenesim | 2633c63bc5cb97ea99017b2e25fc9b4f66d72605 | [
"MIT"
] | 9 | 2015-03-10T16:00:51.000Z | 2020-10-14T17:18:25.000Z | scenesim/display/geometry.py | jhamrick/scenesim | 6ff41c1428a32c078104332431906f4faa0990db | [
"MIT"
] | 1 | 2015-02-13T01:39:46.000Z | 2015-04-28T02:54:50.000Z | scenesim/display/geometry.py | jhamrick/scenesim | 6ff41c1428a32c078104332431906f4faa0990db | [
"MIT"
] | null | null | null | """
``scenesim.display.geometry``
=============================
Functions for manipulating graphics geometry.
"""
import numpy as np
def zbuffer_to_z(zb, near, far):
"""Inputs Z-buffer image and returns each pixel's distance from the
camera along the Z-axis.
Args:
zb (numpy.ndarray, 2D): Z-buffer image.
near (float): Distance of near camera plane.
far (float): Distance of far camera plane.
Return:
(numpy.ndarray, 2D): Z-distance of each pixel.
"""
z = far * near / (far - zb * (far - near))
return z
def img_to_d(xi, yi, zb, near, far):
"""Inputs image X, Y coordinates and Z-buffer image, and returns
Euclidean distance from the camera's position to each point.
Args:
xi, yi (numpy.ndarray, 2D): X-, Y-coordinates of each pixel.
zb (numpy.ndarray, 2D): Z-buffer image.
near (float): Distance of near camera plane.
far (float): Distance of far camera plane.
Return:
(numpy.ndarray, 2D): Euclidean distance of each pixel.
"""
z = zbuffer_to_z(zb, near, far)
phi = np.arctan2(np.sqrt(xi ** 2 + yi ** 2), near)
d = z / np.cos(phi)
return d
def img_to_xyz(xi, yi, zb, near, far):
"""Inputs image X, Y coordinates and Z-buffer image, and returns the
pixels' X, Y, Z coordinates in 3D.
Args:
xi, yi (numpy.ndarray, 2D): X-, Y-coordinates of each pixel.
zb (numpy.ndarray, 2D): Z-buffer image.
near (float): Distance of near camera plane.
far (float): Distance of far camera plane.
Return:
(numpy.ndarray, 3x2D): X-, Y-, Z-coordinates of each pixel.
"""
z = zbuffer_to_z(zb, near, far)
x = xi * z / near
y = yi * z / near
xyz = np.array((x, y, z))
return xyz
def get_projection_mat(camera):
"""Projection matrix of camera.
Args:
camera (panda3d.core.NodePath): Camera NodePath.
Return:
(numpy.matrix, 4x4): Projection matrix (homogeneous).
"""
lens = camera.node().getLens()
frust_mat = np.matrix(lens.getProjectionMat())
cam_mat = np.matrix(camera.getNetTransform().getMat())
proj_mat = cam_mat.I * frust_mat
return proj_mat
def extrude(point2d, proj_mat):
"""Compute the 3D inverse perspective projection of a 2D point.
Args:
point2d (numpy.ndarray, Nx2): Array of 2D points.
proj_mat (numpy.matrix, 4x4): Projection matrix (homogeneous).
Return:
(numpy.ndarray, Nx3): Array of inverse projected 3D points.
"""
# Inverse projection matrix
proj_mat_inv = np.linalg.inv(proj_mat)
# calculate the near and far points
hp2 = np.array(((point2d[0], point2d[1], -1., 1.),
(point2d[0], point2d[1], 1., 1.)))
hp3 = np.dot(hp2, proj_mat_inv)
scale = hp3[:, [3]].copy()
thresh = 0.00001
scale[(scale > 0) & (scale < thresh)] = thresh
scale[(scale < 0) & (scale > -thresh)] = -thresh
point3d = np.array(hp3[:, :3] / scale)
return point3d
def project(point3d, proj_mat):
""" Compute the 2D perspective projection of 3D point(s).
Args:
point3d (numpy.ndarray, Nx{3,4}): Array of 3D (or 4D, if homogeneous) points.
proj_mat (numpy.matrix, 4x4): Projection matrix (homogeneous).
Return:
(numpy.ndarray, Nx2): Array of inverse projected 2D points.
(numpy.ndarray, N): Indicates points that are behind the camera.
"""
# Cast to np.array
point3d = np.array(point3d)
if point3d.ndim == 1:
# Add dimension in front, if necessary
point3d = point3d[None, :]
# Make homogeneous coordinates from point3d
d1len = point3d.shape[1]
if d1len == 3:
hp3 = np.hstack((point3d, np.ones(point3d.shape[0])[:, None]))
elif d1len == 4:
hp3 = point3d
else:
raise ValueError("point3d must be either Nx{3,4}, but it is %i" %
d1len)
# Compute the linear portion of the projection
hp2 = np.dot(hp3, proj_mat)
# Compute z-scaling
point2d = np.array(hp2[:, :2] / hp2[:, [3]])
f_behind = hp2[:, 2] < 0
return point2d, f_behind
def plane_intersection(line3, point3, normal3):
""" Compute point of intersection between a line and a plane.
Args:
line3 (numpy.ndarray, 2x3): 3D line defined by endpoints.
point3 (numpy.ndarray, 3): 3D point on the plane.
normal3 (numpy.ndarray, 3): 3D normal to the plane.
Return:
(numpy.ndarray, 3): 3D intersection point.
"""
# Line ray
ray = np.diff(line3, axis=0).ravel()
# https://en.wikipedia.org/wiki/Line-plane_intersection
d = np.dot(point3 - line3[0], normal3) / np.dot(ray, normal3)
# Intersection point
p3 = d * ray + line3[0]
return p3
| 29.030303 | 85 | 0.607724 | import numpy as np
def zbuffer_to_z(zb, near, far):
z = far * near / (far - zb * (far - near))
return z
def img_to_d(xi, yi, zb, near, far):
z = zbuffer_to_z(zb, near, far)
phi = np.arctan2(np.sqrt(xi ** 2 + yi ** 2), near)
d = z / np.cos(phi)
return d
def img_to_xyz(xi, yi, zb, near, far):
z = zbuffer_to_z(zb, near, far)
x = xi * z / near
y = yi * z / near
xyz = np.array((x, y, z))
return xyz
def get_projection_mat(camera):
lens = camera.node().getLens()
frust_mat = np.matrix(lens.getProjectionMat())
cam_mat = np.matrix(camera.getNetTransform().getMat())
proj_mat = cam_mat.I * frust_mat
return proj_mat
def extrude(point2d, proj_mat):
proj_mat_inv = np.linalg.inv(proj_mat)
hp2 = np.array(((point2d[0], point2d[1], -1., 1.),
(point2d[0], point2d[1], 1., 1.)))
hp3 = np.dot(hp2, proj_mat_inv)
scale = hp3[:, [3]].copy()
thresh = 0.00001
scale[(scale > 0) & (scale < thresh)] = thresh
scale[(scale < 0) & (scale > -thresh)] = -thresh
point3d = np.array(hp3[:, :3] / scale)
return point3d
def project(point3d, proj_mat):
point3d = np.array(point3d)
if point3d.ndim == 1:
point3d = point3d[None, :]
d1len = point3d.shape[1]
if d1len == 3:
hp3 = np.hstack((point3d, np.ones(point3d.shape[0])[:, None]))
elif d1len == 4:
hp3 = point3d
else:
raise ValueError("point3d must be either Nx{3,4}, but it is %i" %
d1len)
hp2 = np.dot(hp3, proj_mat)
point2d = np.array(hp2[:, :2] / hp2[:, [3]])
f_behind = hp2[:, 2] < 0
return point2d, f_behind
def plane_intersection(line3, point3, normal3):
ray = np.diff(line3, axis=0).ravel()
d = np.dot(point3 - line3[0], normal3) / np.dot(ray, normal3)
p3 = d * ray + line3[0]
return p3
| true | true |
f73c7bd9924bbcc84301f73cd0e72ec6d43c1c68 | 2,811 | py | Python | face_web/face_service/service.py | Face-Recognition-Learning-Group/face_service | c23a8519cbf0f0f6297d7b43a5db8077438c58dd | [
"Apache-2.0"
] | 6 | 2021-05-19T06:48:35.000Z | 2021-11-09T11:52:11.000Z | face_web/face_service/service.py | VSOURCE-Platform/VSOURCE_FACE_PLATFORM | c23a8519cbf0f0f6297d7b43a5db8077438c58dd | [
"Apache-2.0"
] | 1 | 2021-05-09T08:29:39.000Z | 2021-05-09T08:29:39.000Z | face_web/face_service/service.py | VSOURCE-Platform/VSOURCE_FACE_PLATFORM | c23a8519cbf0f0f6297d7b43a5db8077438c58dd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author : Ecohnoch(xcy)
# @File : service.py
# @Function : TODO
import copy
import flask_login
from app import app, db
import configs
def get_data_from_page_limit(page, limit):
all_requests = db[configs.app_database_request_table].find()
results = db[configs.app_database_table].find()
ans_data = []
for each_request in all_requests:
_message = {}
_id = each_request['id']
this_result = db[configs.app_database_table].find_one({'id': str(_id)})
if this_result:
# 任务已经结束,拿到任务的所有信息
each_result = copy.deepcopy(this_result)
_message['id'] = each_result['id']
_message['status'] = each_result['status']
_message['createDate'] = each_result['create_date']
_message['collectedDate'] = each_result['collected_date']
_message['face_name1'] = "/get_image_file/" + each_result['face_name1']
# _message['face_name1'] = '<img width=\"50px\" height=\"50px\" src=\"/get_image_file/{}\">'.format(
# each_result['face_name1'])
_message['face_name2'] = "/get_image_file/" + each_result['face_name2']
# _message['face_name2'] = '<img width=\"50px\" height=\"50px\" src=\"/get_image_file/{}\">'.format(
# each_result['face_name2'])
_message['score'] = each_result['score']
if 'owner' not in dict(each_result).keys():
_message['owner'] = 'debug'
else:
_message['owner'] = each_result['owner']
else:
# 任务暂未结束,拿到用户信息和状态
_message['id'] = each_request['id']
_message['status'] = each_request['status']
_message['createDate'] = each_request['create_date']
_message['face_name1'] = "/get_image_file/" + each_result['face_name1']
_message['face_name2'] = "/get_image_file/" + each_result['face_name2']
if 'owner' not in dict(each_request).keys():
_message['owner'] = 'debug'
else:
_message['owner'] = each_request['owner']
if _message['owner'] == flask_login.current_user.id:
# TODO 如果是正常的用户,只加入该用户的数据,这里应该在db层解决,待优化
ans_data.append(_message)
continue
if _message['owner'] == 'debug' and flask_login.current_user.is_visitor():
# 如果是游客,加入debug用户的数据
ans_data.append(_message)
continue
final_data = []
start_ind = (page - 1) * limit
end_ind = page * limit
if start_ind >= len(ans_data):
final_data = []
else:
sorted_data = sorted(ans_data, key=lambda x: x['createDate'], reverse=True)
final_data = sorted_data[start_ind: end_ind]
return final_data, len(ans_data) | 41.955224 | 112 | 0.588047 |
import copy
import flask_login
from app import app, db
import configs
def get_data_from_page_limit(page, limit):
all_requests = db[configs.app_database_request_table].find()
results = db[configs.app_database_table].find()
ans_data = []
for each_request in all_requests:
_message = {}
_id = each_request['id']
this_result = db[configs.app_database_table].find_one({'id': str(_id)})
if this_result:
each_result = copy.deepcopy(this_result)
_message['id'] = each_result['id']
_message['status'] = each_result['status']
_message['createDate'] = each_result['create_date']
_message['collectedDate'] = each_result['collected_date']
_message['face_name1'] = "/get_image_file/" + each_result['face_name1']
_message['face_name2'] = "/get_image_file/" + each_result['face_name2']
_message['score'] = each_result['score']
if 'owner' not in dict(each_result).keys():
_message['owner'] = 'debug'
else:
_message['owner'] = each_result['owner']
else:
_message['id'] = each_request['id']
_message['status'] = each_request['status']
_message['createDate'] = each_request['create_date']
_message['face_name1'] = "/get_image_file/" + each_result['face_name1']
_message['face_name2'] = "/get_image_file/" + each_result['face_name2']
if 'owner' not in dict(each_request).keys():
_message['owner'] = 'debug'
else:
_message['owner'] = each_request['owner']
if _message['owner'] == flask_login.current_user.id:
ans_data.append(_message)
continue
if _message['owner'] == 'debug' and flask_login.current_user.is_visitor():
ans_data.append(_message)
continue
final_data = []
start_ind = (page - 1) * limit
end_ind = page * limit
if start_ind >= len(ans_data):
final_data = []
else:
sorted_data = sorted(ans_data, key=lambda x: x['createDate'], reverse=True)
final_data = sorted_data[start_ind: end_ind]
return final_data, len(ans_data) | true | true |
f73c7c83759f8b64a6ceeb1b97ca71e844c84163 | 11,414 | py | Python | esphome/components/time/__init__.py | psbaltar/esphome | a8d87a1fee5d9ec7bea81cd6e65d8ec7babfbde4 | [
"MIT"
] | 2 | 2020-05-11T09:26:16.000Z | 2021-07-20T03:06:37.000Z | esphome/components/time/__init__.py | frenck/esphome | 92f8b043ce940fd565b3d79562ba415de60f5e34 | [
"MIT"
] | null | null | null | esphome/components/time/__init__.py | frenck/esphome | 92f8b043ce940fd565b3d79562ba415de60f5e34 | [
"MIT"
] | null | null | null | import datetime
import logging
import math
import voluptuous as vol
from esphome import automation
import esphome.config_validation as cv
from esphome.const import CONF_CRON, CONF_DAYS_OF_MONTH, CONF_DAYS_OF_WEEK, CONF_HOURS, \
CONF_MINUTES, CONF_MONTHS, CONF_ON_TIME, CONF_SECONDS, CONF_TIMEZONE, CONF_TRIGGER_ID
from esphome.core import CORE
from esphome.cpp_generator import Pvariable, add
from esphome.cpp_types import App, Component, NoArg, Trigger, esphome_ns
from esphome.py_compat import string_types
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
})
time_ns = esphome_ns.namespace('time')
RealTimeClockComponent = time_ns.class_('RealTimeClockComponent', Component)
CronTrigger = time_ns.class_('CronTrigger', Trigger.template(NoArg), Component)
ESPTime = time_ns.struct('ESPTime')
def _tz_timedelta(td):
offset_hour = int(td.total_seconds() / (60 * 60))
offset_minute = int(abs(td.total_seconds() / 60)) % 60
offset_second = int(abs(td.total_seconds())) % 60
if offset_hour == 0 and offset_minute == 0 and offset_second == 0:
return '0'
if offset_minute == 0 and offset_second == 0:
return '{}'.format(offset_hour)
if offset_second == 0:
return '{}:{}'.format(offset_hour, offset_minute)
return '{}:{}:{}'.format(offset_hour, offset_minute, offset_second)
# https://stackoverflow.com/a/16804556/8924614
def _week_of_month(dt):
first_day = dt.replace(day=1)
dom = dt.day
adjusted_dom = dom + first_day.weekday()
return int(math.ceil(adjusted_dom / 7.0))
def _tz_dst_str(dt):
td = datetime.timedelta(hours=dt.hour, minutes=dt.minute, seconds=dt.second)
return 'M{}.{}.{}/{}'.format(dt.month, _week_of_month(dt), dt.isoweekday() % 7,
_tz_timedelta(td))
def convert_tz(pytz_obj):
tz = pytz_obj
def _dst(dt, is_dst):
try:
return tz.dst(dt, is_dst=is_dst)
except TypeError: # stupid pytz...
return tz.dst(dt)
def _tzname(dt, is_dst):
try:
return tz.tzname(dt, is_dst=is_dst)
except TypeError: # stupid pytz...
return tz.tzname(dt)
def _utcoffset(dt, is_dst):
try:
return tz.utcoffset(dt, is_dst=is_dst)
except TypeError: # stupid pytz...
return tz.utcoffset(dt)
dst_begins = None
dst_tzname = None
dst_utcoffset = None
dst_ends = None
norm_tzname = None
norm_utcoffset = None
hour = datetime.timedelta(hours=1)
this_year = datetime.datetime.now().year
dt = datetime.datetime(year=this_year, month=1, day=1)
last_dst = None
while dt.year == this_year:
current_dst = _dst(dt, not last_dst)
is_dst = bool(current_dst)
if is_dst != last_dst:
if is_dst:
dst_begins = dt
dst_tzname = _tzname(dt, True)
dst_utcoffset = _utcoffset(dt, True)
else:
dst_ends = dt + hour
norm_tzname = _tzname(dt, False)
norm_utcoffset = _utcoffset(dt, False)
last_dst = is_dst
dt += hour
tzbase = '{}{}'.format(norm_tzname, _tz_timedelta(-1 * norm_utcoffset))
if dst_begins is None:
# No DST in this timezone
_LOGGER.info("Detected timezone '%s' with UTC offset %s",
norm_tzname, _tz_timedelta(norm_utcoffset))
return tzbase
tzext = '{}{},{},{}'.format(dst_tzname, _tz_timedelta(-1 * dst_utcoffset),
_tz_dst_str(dst_begins), _tz_dst_str(dst_ends))
_LOGGER.info("Detected timezone '%s' with UTC offset %s and daylight savings time from "
"%s to %s",
norm_tzname, _tz_timedelta(norm_utcoffset), dst_begins.strftime("%x %X"),
dst_ends.strftime("%x %X"))
return tzbase + tzext
def detect_tz():
try:
import tzlocal
import pytz
except ImportError:
raise vol.Invalid("No timezone specified and 'tzlocal' not installed. To automatically "
"detect the timezone please install tzlocal (pip install tzlocal)")
try:
tz = tzlocal.get_localzone()
except pytz.exceptions.UnknownTimeZoneError:
_LOGGER.warning("Could not auto-detect timezone. Using UTC...")
return 'UTC'
return convert_tz(tz)
def _parse_cron_int(value, special_mapping, message):
special_mapping = special_mapping or {}
if isinstance(value, string_types) and value in special_mapping:
return special_mapping[value]
try:
return int(value)
except ValueError:
raise vol.Invalid(message.format(value))
def _parse_cron_part(part, min_value, max_value, special_mapping):
if part in ('*', '?'):
return set(x for x in range(min_value, max_value + 1))
if '/' in part:
data = part.split('/')
if len(data) > 2:
raise vol.Invalid(u"Can't have more than two '/' in one time expression, got {}"
.format(part))
offset, repeat = data
offset_n = 0
if offset:
offset_n = _parse_cron_int(offset, special_mapping,
u"Offset for '/' time expression must be an integer, got {}")
try:
repeat_n = int(repeat)
except ValueError:
raise vol.Invalid(u"Repeat for '/' time expression must be an integer, got {}"
.format(repeat))
return set(x for x in range(offset_n, max_value + 1, repeat_n))
if '-' in part:
data = part.split('-')
if len(data) > 2:
raise vol.Invalid(u"Can't have more than two '-' in range time expression '{}'"
.format(part))
begin, end = data
begin_n = _parse_cron_int(begin, special_mapping, u"Number for time range must be integer, "
u"got {}")
end_n = _parse_cron_int(end, special_mapping, u"Number for time range must be integer, "
u"got {}")
if end_n < begin_n:
return set(x for x in range(end_n, max_value + 1)) | \
set(x for x in range(min_value, begin_n + 1))
return set(x for x in range(begin_n, end_n + 1))
return {_parse_cron_int(part, special_mapping, u"Number for time expression must be an "
u"integer, got {}")}
def cron_expression_validator(name, min_value, max_value, special_mapping=None):
def validator(value):
if isinstance(value, list):
for v in value:
if not isinstance(v, int):
raise vol.Invalid(
"Expected integer for {} '{}', got {}".format(v, name, type(v)))
if v < min_value or v > max_value:
raise vol.Invalid(
"{} {} is out of range (min={} max={}).".format(name, v, min_value,
max_value))
return list(sorted(value))
value = cv.string(value)
values = set()
for part in value.split(','):
values |= _parse_cron_part(part, min_value, max_value, special_mapping)
return validator(list(values))
return validator
validate_cron_seconds = cron_expression_validator('seconds', 0, 60)
validate_cron_minutes = cron_expression_validator('minutes', 0, 59)
validate_cron_hours = cron_expression_validator('hours', 0, 23)
validate_cron_days_of_month = cron_expression_validator('days of month', 1, 31)
validate_cron_months = cron_expression_validator('months', 1, 12, {
'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6, 'JUL': 7, 'AUG': 8,
'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12
})
validate_cron_days_of_week = cron_expression_validator('days of week', 1, 7, {
'SUN': 1, 'MON': 2, 'TUE': 3, 'WED': 4, 'THU': 5, 'FRI': 6, 'SAT': 7
})
CRON_KEYS = [CONF_SECONDS, CONF_MINUTES, CONF_HOURS, CONF_DAYS_OF_MONTH, CONF_MONTHS,
CONF_DAYS_OF_WEEK]
def validate_cron_raw(value):
value = cv.string(value)
value = value.split(' ')
if len(value) != 6:
raise vol.Invalid("Cron expression must consist of exactly 6 space-separated parts, "
"not {}".format(len(value)))
seconds, minutes, hours, days_of_month, months, days_of_week = value
return {
CONF_SECONDS: validate_cron_seconds(seconds),
CONF_MINUTES: validate_cron_minutes(minutes),
CONF_HOURS: validate_cron_hours(hours),
CONF_DAYS_OF_MONTH: validate_cron_days_of_month(days_of_month),
CONF_MONTHS: validate_cron_months(months),
CONF_DAYS_OF_WEEK: validate_cron_days_of_week(days_of_week),
}
def validate_cron_keys(value):
if CONF_CRON in value:
for key in value.keys():
if key in CRON_KEYS:
raise vol.Invalid("Cannot use option {} when cron: is specified.".format(key))
cron_ = value[CONF_CRON]
value = {x: value[x] for x in value if x != CONF_CRON}
value.update(cron_)
return value
return cv.has_at_least_one_key(*CRON_KEYS)(value)
def validate_tz(value):
value = cv.string_strict(value)
try:
import pytz
return convert_tz(pytz.timezone(value))
except Exception: # pylint: disable=broad-except
return value
TIME_PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_TIMEZONE, default=detect_tz): validate_tz,
vol.Optional(CONF_ON_TIME): automation.validate_automation({
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_variable_id(CronTrigger),
vol.Optional(CONF_SECONDS): validate_cron_seconds,
vol.Optional(CONF_MINUTES): validate_cron_minutes,
vol.Optional(CONF_HOURS): validate_cron_hours,
vol.Optional(CONF_DAYS_OF_MONTH): validate_cron_days_of_month,
vol.Optional(CONF_MONTHS): validate_cron_months,
vol.Optional(CONF_DAYS_OF_WEEK): validate_cron_days_of_week,
vol.Optional(CONF_CRON): validate_cron_raw,
}, validate_cron_keys),
})
def setup_time_core_(time_var, config):
add(time_var.set_timezone(config[CONF_TIMEZONE]))
for conf in config.get(CONF_ON_TIME, []):
rhs = App.register_component(time_var.Pmake_cron_trigger())
trigger = Pvariable(conf[CONF_TRIGGER_ID], rhs)
seconds = conf.get(CONF_SECONDS, [x for x in range(0, 61)])
add(trigger.add_seconds(seconds))
minutes = conf.get(CONF_MINUTES, [x for x in range(0, 60)])
add(trigger.add_minutes(minutes))
hours = conf.get(CONF_HOURS, [x for x in range(0, 24)])
add(trigger.add_hours(hours))
days_of_month = conf.get(CONF_DAYS_OF_MONTH, [x for x in range(1, 32)])
add(trigger.add_days_of_month(days_of_month))
months = conf.get(CONF_MONTHS, [x for x in range(1, 13)])
add(trigger.add_months(months))
days_of_week = conf.get(CONF_DAYS_OF_WEEK, [x for x in range(1, 8)])
add(trigger.add_days_of_week(days_of_week))
automation.build_automation(trigger, NoArg, conf)
def setup_time(time_var, config):
CORE.add_job(setup_time_core_, time_var, config)
BUILD_FLAGS = '-DUSE_TIME'
| 37.300654 | 100 | 0.623708 | import datetime
import logging
import math
import voluptuous as vol
from esphome import automation
import esphome.config_validation as cv
from esphome.const import CONF_CRON, CONF_DAYS_OF_MONTH, CONF_DAYS_OF_WEEK, CONF_HOURS, \
CONF_MINUTES, CONF_MONTHS, CONF_ON_TIME, CONF_SECONDS, CONF_TIMEZONE, CONF_TRIGGER_ID
from esphome.core import CORE
from esphome.cpp_generator import Pvariable, add
from esphome.cpp_types import App, Component, NoArg, Trigger, esphome_ns
from esphome.py_compat import string_types
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
})
time_ns = esphome_ns.namespace('time')
RealTimeClockComponent = time_ns.class_('RealTimeClockComponent', Component)
CronTrigger = time_ns.class_('CronTrigger', Trigger.template(NoArg), Component)
ESPTime = time_ns.struct('ESPTime')
def _tz_timedelta(td):
offset_hour = int(td.total_seconds() / (60 * 60))
offset_minute = int(abs(td.total_seconds() / 60)) % 60
offset_second = int(abs(td.total_seconds())) % 60
if offset_hour == 0 and offset_minute == 0 and offset_second == 0:
return '0'
if offset_minute == 0 and offset_second == 0:
return '{}'.format(offset_hour)
if offset_second == 0:
return '{}:{}'.format(offset_hour, offset_minute)
return '{}:{}:{}'.format(offset_hour, offset_minute, offset_second)
def _week_of_month(dt):
first_day = dt.replace(day=1)
dom = dt.day
adjusted_dom = dom + first_day.weekday()
return int(math.ceil(adjusted_dom / 7.0))
def _tz_dst_str(dt):
td = datetime.timedelta(hours=dt.hour, minutes=dt.minute, seconds=dt.second)
return 'M{}.{}.{}/{}'.format(dt.month, _week_of_month(dt), dt.isoweekday() % 7,
_tz_timedelta(td))
def convert_tz(pytz_obj):
tz = pytz_obj
def _dst(dt, is_dst):
try:
return tz.dst(dt, is_dst=is_dst)
except TypeError:
return tz.dst(dt)
def _tzname(dt, is_dst):
try:
return tz.tzname(dt, is_dst=is_dst)
except TypeError:
return tz.tzname(dt)
def _utcoffset(dt, is_dst):
try:
return tz.utcoffset(dt, is_dst=is_dst)
except TypeError:
return tz.utcoffset(dt)
dst_begins = None
dst_tzname = None
dst_utcoffset = None
dst_ends = None
norm_tzname = None
norm_utcoffset = None
hour = datetime.timedelta(hours=1)
this_year = datetime.datetime.now().year
dt = datetime.datetime(year=this_year, month=1, day=1)
last_dst = None
while dt.year == this_year:
current_dst = _dst(dt, not last_dst)
is_dst = bool(current_dst)
if is_dst != last_dst:
if is_dst:
dst_begins = dt
dst_tzname = _tzname(dt, True)
dst_utcoffset = _utcoffset(dt, True)
else:
dst_ends = dt + hour
norm_tzname = _tzname(dt, False)
norm_utcoffset = _utcoffset(dt, False)
last_dst = is_dst
dt += hour
tzbase = '{}{}'.format(norm_tzname, _tz_timedelta(-1 * norm_utcoffset))
if dst_begins is None:
_LOGGER.info("Detected timezone '%s' with UTC offset %s",
norm_tzname, _tz_timedelta(norm_utcoffset))
return tzbase
tzext = '{}{},{},{}'.format(dst_tzname, _tz_timedelta(-1 * dst_utcoffset),
_tz_dst_str(dst_begins), _tz_dst_str(dst_ends))
_LOGGER.info("Detected timezone '%s' with UTC offset %s and daylight savings time from "
"%s to %s",
norm_tzname, _tz_timedelta(norm_utcoffset), dst_begins.strftime("%x %X"),
dst_ends.strftime("%x %X"))
return tzbase + tzext
def detect_tz():
try:
import tzlocal
import pytz
except ImportError:
raise vol.Invalid("No timezone specified and 'tzlocal' not installed. To automatically "
"detect the timezone please install tzlocal (pip install tzlocal)")
try:
tz = tzlocal.get_localzone()
except pytz.exceptions.UnknownTimeZoneError:
_LOGGER.warning("Could not auto-detect timezone. Using UTC...")
return 'UTC'
return convert_tz(tz)
def _parse_cron_int(value, special_mapping, message):
special_mapping = special_mapping or {}
if isinstance(value, string_types) and value in special_mapping:
return special_mapping[value]
try:
return int(value)
except ValueError:
raise vol.Invalid(message.format(value))
def _parse_cron_part(part, min_value, max_value, special_mapping):
if part in ('*', '?'):
return set(x for x in range(min_value, max_value + 1))
if '/' in part:
data = part.split('/')
if len(data) > 2:
raise vol.Invalid(u"Can't have more than two '/' in one time expression, got {}"
.format(part))
offset, repeat = data
offset_n = 0
if offset:
offset_n = _parse_cron_int(offset, special_mapping,
u"Offset for '/' time expression must be an integer, got {}")
try:
repeat_n = int(repeat)
except ValueError:
raise vol.Invalid(u"Repeat for '/' time expression must be an integer, got {}"
.format(repeat))
return set(x for x in range(offset_n, max_value + 1, repeat_n))
if '-' in part:
data = part.split('-')
if len(data) > 2:
raise vol.Invalid(u"Can't have more than two '-' in range time expression '{}'"
.format(part))
begin, end = data
begin_n = _parse_cron_int(begin, special_mapping, u"Number for time range must be integer, "
u"got {}")
end_n = _parse_cron_int(end, special_mapping, u"Number for time range must be integer, "
u"got {}")
if end_n < begin_n:
return set(x for x in range(end_n, max_value + 1)) | \
set(x for x in range(min_value, begin_n + 1))
return set(x for x in range(begin_n, end_n + 1))
return {_parse_cron_int(part, special_mapping, u"Number for time expression must be an "
u"integer, got {}")}
def cron_expression_validator(name, min_value, max_value, special_mapping=None):
def validator(value):
if isinstance(value, list):
for v in value:
if not isinstance(v, int):
raise vol.Invalid(
"Expected integer for {} '{}', got {}".format(v, name, type(v)))
if v < min_value or v > max_value:
raise vol.Invalid(
"{} {} is out of range (min={} max={}).".format(name, v, min_value,
max_value))
return list(sorted(value))
value = cv.string(value)
values = set()
for part in value.split(','):
values |= _parse_cron_part(part, min_value, max_value, special_mapping)
return validator(list(values))
return validator
validate_cron_seconds = cron_expression_validator('seconds', 0, 60)
validate_cron_minutes = cron_expression_validator('minutes', 0, 59)
validate_cron_hours = cron_expression_validator('hours', 0, 23)
validate_cron_days_of_month = cron_expression_validator('days of month', 1, 31)
validate_cron_months = cron_expression_validator('months', 1, 12, {
'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6, 'JUL': 7, 'AUG': 8,
'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12
})
validate_cron_days_of_week = cron_expression_validator('days of week', 1, 7, {
'SUN': 1, 'MON': 2, 'TUE': 3, 'WED': 4, 'THU': 5, 'FRI': 6, 'SAT': 7
})
CRON_KEYS = [CONF_SECONDS, CONF_MINUTES, CONF_HOURS, CONF_DAYS_OF_MONTH, CONF_MONTHS,
CONF_DAYS_OF_WEEK]
def validate_cron_raw(value):
value = cv.string(value)
value = value.split(' ')
if len(value) != 6:
raise vol.Invalid("Cron expression must consist of exactly 6 space-separated parts, "
"not {}".format(len(value)))
seconds, minutes, hours, days_of_month, months, days_of_week = value
return {
CONF_SECONDS: validate_cron_seconds(seconds),
CONF_MINUTES: validate_cron_minutes(minutes),
CONF_HOURS: validate_cron_hours(hours),
CONF_DAYS_OF_MONTH: validate_cron_days_of_month(days_of_month),
CONF_MONTHS: validate_cron_months(months),
CONF_DAYS_OF_WEEK: validate_cron_days_of_week(days_of_week),
}
def validate_cron_keys(value):
if CONF_CRON in value:
for key in value.keys():
if key in CRON_KEYS:
raise vol.Invalid("Cannot use option {} when cron: is specified.".format(key))
cron_ = value[CONF_CRON]
value = {x: value[x] for x in value if x != CONF_CRON}
value.update(cron_)
return value
return cv.has_at_least_one_key(*CRON_KEYS)(value)
def validate_tz(value):
value = cv.string_strict(value)
try:
import pytz
return convert_tz(pytz.timezone(value))
except Exception:
return value
TIME_PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_TIMEZONE, default=detect_tz): validate_tz,
vol.Optional(CONF_ON_TIME): automation.validate_automation({
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_variable_id(CronTrigger),
vol.Optional(CONF_SECONDS): validate_cron_seconds,
vol.Optional(CONF_MINUTES): validate_cron_minutes,
vol.Optional(CONF_HOURS): validate_cron_hours,
vol.Optional(CONF_DAYS_OF_MONTH): validate_cron_days_of_month,
vol.Optional(CONF_MONTHS): validate_cron_months,
vol.Optional(CONF_DAYS_OF_WEEK): validate_cron_days_of_week,
vol.Optional(CONF_CRON): validate_cron_raw,
}, validate_cron_keys),
})
def setup_time_core_(time_var, config):
add(time_var.set_timezone(config[CONF_TIMEZONE]))
for conf in config.get(CONF_ON_TIME, []):
rhs = App.register_component(time_var.Pmake_cron_trigger())
trigger = Pvariable(conf[CONF_TRIGGER_ID], rhs)
seconds = conf.get(CONF_SECONDS, [x for x in range(0, 61)])
add(trigger.add_seconds(seconds))
minutes = conf.get(CONF_MINUTES, [x for x in range(0, 60)])
add(trigger.add_minutes(minutes))
hours = conf.get(CONF_HOURS, [x for x in range(0, 24)])
add(trigger.add_hours(hours))
days_of_month = conf.get(CONF_DAYS_OF_MONTH, [x for x in range(1, 32)])
add(trigger.add_days_of_month(days_of_month))
months = conf.get(CONF_MONTHS, [x for x in range(1, 13)])
add(trigger.add_months(months))
days_of_week = conf.get(CONF_DAYS_OF_WEEK, [x for x in range(1, 8)])
add(trigger.add_days_of_week(days_of_week))
automation.build_automation(trigger, NoArg, conf)
def setup_time(time_var, config):
CORE.add_job(setup_time_core_, time_var, config)
BUILD_FLAGS = '-DUSE_TIME'
| true | true |
f73c7c9dfabc60a11ceb79855e4d40e631bdb3b1 | 26,859 | py | Python | api_core/google/api_core/bidi.py | beittatt/cloud-python | cdb4cc4f3c568ff32acf35c34910d23f2d3800a0 | [
"Apache-2.0"
] | 2 | 2021-11-26T07:08:43.000Z | 2022-03-07T20:20:04.000Z | api_core/google/api_core/bidi.py | beittatt/cloud-python | cdb4cc4f3c568ff32acf35c34910d23f2d3800a0 | [
"Apache-2.0"
] | null | null | null | api_core/google/api_core/bidi.py | beittatt/cloud-python | cdb4cc4f3c568ff32acf35c34910d23f2d3800a0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bi-directional streaming RPC helpers."""
import collections
import datetime
import logging
import threading
import time
from six.moves import queue
from google.api_core import exceptions
_LOGGER = logging.getLogger(__name__)
_BIDIRECTIONAL_CONSUMER_NAME = "Thread-ConsumeBidirectionalStream"
class _RequestQueueGenerator(object):
"""A helper for sending requests to a gRPC stream from a Queue.
This generator takes requests off a given queue and yields them to gRPC.
This helper is useful when you have an indeterminate, indefinite, or
otherwise open-ended set of requests to send through a request-streaming
(or bidirectional) RPC.
The reason this is necessary is because gRPC takes an iterator as the
request for request-streaming RPCs. gRPC consumes this iterator in another
thread to allow it to block while generating requests for the stream.
However, if the generator blocks indefinitely gRPC will not be able to
clean up the thread as it'll be blocked on `next(iterator)` and not be able
to check the channel status to stop iterating. This helper mitigates that
by waiting on the queue with a timeout and checking the RPC state before
yielding.
Finally, it allows for retrying without swapping queues because if it does
pull an item off the queue when the RPC is inactive, it'll immediately put
it back and then exit. This is necessary because yielding the item in this
case will cause gRPC to discard it. In practice, this means that the order
of messages is not guaranteed. If such a thing is necessary it would be
easy to use a priority queue.
Example::
requests = request_queue_generator(q)
call = stub.StreamingRequest(iter(requests))
requests.call = call
for response in call:
print(response)
q.put(...)
Note that it is possible to accomplish this behavior without "spinning"
(using a queue timeout). One possible way would be to use more threads to
multiplex the grpc end event with the queue, another possible way is to
use selectors and a custom event/queue object. Both of these approaches
are significant from an engineering perspective for small benefit - the
CPU consumed by spinning is pretty minuscule.
Args:
queue (queue.Queue): The request queue.
period (float): The number of seconds to wait for items from the queue
before checking if the RPC is cancelled. In practice, this
determines the maximum amount of time the request consumption
thread will live after the RPC is cancelled.
initial_request (Union[protobuf.Message,
Callable[None, protobuf.Message]]): The initial request to
yield. This is done independently of the request queue to allow fo
easily restarting streams that require some initial configuration
request.
"""
def __init__(self, queue, period=1, initial_request=None):
self._queue = queue
self._period = period
self._initial_request = initial_request
self.call = None
def _is_active(self):
# Note: there is a possibility that this starts *before* the call
# property is set. So we have to check if self.call is set before
# seeing if it's active.
if self.call is not None and not self.call.is_active():
return False
else:
return True
def __iter__(self):
if self._initial_request is not None:
if callable(self._initial_request):
yield self._initial_request()
else:
yield self._initial_request
while True:
try:
item = self._queue.get(timeout=self._period)
except queue.Empty:
if not self._is_active():
_LOGGER.debug(
"Empty queue and inactive call, exiting request " "generator."
)
return
else:
# call is still active, keep waiting for queue items.
continue
# The consumer explicitly sent "None", indicating that the request
# should end.
if item is None:
_LOGGER.debug("Cleanly exiting request generator.")
return
if not self._is_active():
# We have an item, but the call is closed. We should put the
# item back on the queue so that the next call can consume it.
self._queue.put(item)
_LOGGER.debug(
"Inactive call, replacing item on queue and exiting "
"request generator."
)
return
yield item
class _Throttle(object):
"""A context manager limiting the total entries in a sliding time window.
If more than ``access_limit`` attempts are made to enter the context manager
instance in the last ``time window`` interval, the exceeding requests block
until enough time elapses.
The context manager instances are thread-safe and can be shared between
multiple threads. If multiple requests are blocked and waiting to enter,
the exact order in which they are allowed to proceed is not determined.
Example::
max_three_per_second = _Throttle(
access_limit=3, time_window=datetime.timedelta(seconds=1)
)
for i in range(5):
with max_three_per_second as time_waited:
print("{}: Waited {} seconds to enter".format(i, time_waited))
Args:
access_limit (int): the maximum number of entries allowed in the time window
time_window (datetime.timedelta): the width of the sliding time window
"""
def __init__(self, access_limit, time_window):
if access_limit < 1:
raise ValueError("access_limit argument must be positive")
if time_window <= datetime.timedelta(0):
raise ValueError("time_window argument must be a positive timedelta")
self._time_window = time_window
self._access_limit = access_limit
self._past_entries = collections.deque(maxlen=access_limit) # least recent first
self._entry_lock = threading.Lock()
def __enter__(self):
with self._entry_lock:
cutoff_time = datetime.datetime.now() - self._time_window
# drop the entries that are too old, as they are no longer relevant
while self._past_entries and self._past_entries[0] < cutoff_time:
self._past_entries.popleft()
if len(self._past_entries) < self._access_limit:
self._past_entries.append(datetime.datetime.now())
return 0.0 # no waiting was needed
to_wait = (self._past_entries[0] - cutoff_time).total_seconds()
time.sleep(to_wait)
self._past_entries.append(datetime.datetime.now())
return to_wait
def __exit__(self, *_):
pass
def __repr__(self):
return "{}(access_limit={}, time_window={})".format(
self.__class__.__name__,
self._access_limit,
repr(self._time_window),
)
class BidiRpc(object):
"""A helper for consuming a bi-directional streaming RPC.
This maps gRPC's built-in interface which uses a request iterator and a
response iterator into a socket-like :func:`send` and :func:`recv`. This
is a more useful pattern for long-running or asymmetric streams (streams
where there is not a direct correlation between the requests and
responses).
Example::
initial_request = example_pb2.StreamingRpcRequest(
setting='example')
rpc = BidiRpc(
stub.StreamingRpc,
initial_request=initial_request,
metadata=[('name', 'value')]
)
rpc.open()
while rpc.is_active():
print(rpc.recv())
rpc.send(example_pb2.StreamingRpcRequest(
data='example'))
This does *not* retry the stream on errors. See :class:`ResumableBidiRpc`.
Args:
start_rpc (grpc.StreamStreamMultiCallable): The gRPC method used to
start the RPC.
initial_request (Union[protobuf.Message,
Callable[None, protobuf.Message]]): The initial request to
yield. This is useful if an initial request is needed to start the
stream.
metadata (Sequence[Tuple(str, str)]): RPC metadata to include in
the request.
"""
def __init__(self, start_rpc, initial_request=None, metadata=None):
self._start_rpc = start_rpc
self._initial_request = initial_request
self._rpc_metadata = metadata
self._request_queue = queue.Queue()
self._request_generator = None
self._is_active = False
self._callbacks = []
self.call = None
def add_done_callback(self, callback):
"""Adds a callback that will be called when the RPC terminates.
This occurs when the RPC errors or is successfully terminated.
Args:
callback (Callable[[grpc.Future], None]): The callback to execute.
It will be provided with the same gRPC future as the underlying
stream which will also be a :class:`grpc.Call`.
"""
self._callbacks.append(callback)
def _on_call_done(self, future):
for callback in self._callbacks:
callback(future)
def open(self):
"""Opens the stream."""
if self.is_active:
raise ValueError("Can not open an already open stream.")
request_generator = _RequestQueueGenerator(
self._request_queue, initial_request=self._initial_request
)
call = self._start_rpc(iter(request_generator), metadata=self._rpc_metadata)
request_generator.call = call
# TODO: api_core should expose the future interface for wrapped
# callables as well.
if hasattr(call, "_wrapped"): # pragma: NO COVER
call._wrapped.add_done_callback(self._on_call_done)
else:
call.add_done_callback(self._on_call_done)
self._request_generator = request_generator
self.call = call
def close(self):
"""Closes the stream."""
if self.call is None:
return
self._request_queue.put(None)
self.call.cancel()
self._request_generator = None
# Don't set self.call to None. Keep it around so that send/recv can
# raise the error.
def send(self, request):
"""Queue a message to be sent on the stream.
Send is non-blocking.
If the underlying RPC has been closed, this will raise.
Args:
request (protobuf.Message): The request to send.
"""
if self.call is None:
raise ValueError("Can not send() on an RPC that has never been open()ed.")
# Don't use self.is_active(), as ResumableBidiRpc will overload it
# to mean something semantically different.
if self.call.is_active():
self._request_queue.put(request)
else:
# calling next should cause the call to raise.
next(self.call)
def recv(self):
"""Wait for a message to be returned from the stream.
Recv is blocking.
If the underlying RPC has been closed, this will raise.
Returns:
protobuf.Message: The received message.
"""
if self.call is None:
raise ValueError("Can not recv() on an RPC that has never been open()ed.")
return next(self.call)
@property
def is_active(self):
"""bool: True if this stream is currently open and active."""
return self.call is not None and self.call.is_active()
@property
def pending_requests(self):
"""int: Returns an estimate of the number of queued requests."""
return self._request_queue.qsize()
def _never_terminate(future_or_error):
"""By default, no errors cause BiDi termination."""
return False
class ResumableBidiRpc(BidiRpc):
"""A :class:`BidiRpc` that can automatically resume the stream on errors.
It uses the ``should_recover`` arg to determine if it should re-establish
the stream on error.
Example::
def should_recover(exc):
return (
isinstance(exc, grpc.RpcError) and
exc.code() == grpc.StatusCode.UNVAILABLE)
initial_request = example_pb2.StreamingRpcRequest(
setting='example')
metadata = [('header_name', 'value')]
rpc = ResumableBidiRpc(
stub.StreamingRpc,
should_recover=should_recover,
initial_request=initial_request,
metadata=metadata
)
rpc.open()
while rpc.is_active():
print(rpc.recv())
rpc.send(example_pb2.StreamingRpcRequest(
data='example'))
Args:
start_rpc (grpc.StreamStreamMultiCallable): The gRPC method used to
start the RPC.
initial_request (Union[protobuf.Message,
Callable[None, protobuf.Message]]): The initial request to
yield. This is useful if an initial request is needed to start the
stream.
should_recover (Callable[[Exception], bool]): A function that returns
True if the stream should be recovered. This will be called
whenever an error is encountered on the stream.
should_terminate (Callable[[Exception], bool]): A function that returns
True if the stream should be terminated. This will be called
whenever an error is encountered on the stream.
metadata Sequence[Tuple(str, str)]: RPC metadata to include in
the request.
throttle_reopen (bool): If ``True``, throttling will be applied to
stream reopen calls. Defaults to ``False``.
"""
def __init__(
self,
start_rpc,
should_recover,
should_terminate=_never_terminate,
initial_request=None,
metadata=None,
throttle_reopen=False,
):
super(ResumableBidiRpc, self).__init__(start_rpc, initial_request, metadata)
self._should_recover = should_recover
self._should_terminate = should_terminate
self._operational_lock = threading.RLock()
self._finalized = False
self._finalize_lock = threading.Lock()
if throttle_reopen:
self._reopen_throttle = _Throttle(
access_limit=5, time_window=datetime.timedelta(seconds=10),
)
else:
self._reopen_throttle = None
def _finalize(self, result):
with self._finalize_lock:
if self._finalized:
return
for callback in self._callbacks:
callback(result)
self._finalized = True
def _on_call_done(self, future):
# Unlike the base class, we only execute the callbacks on a terminal
# error, not for errors that we can recover from. Note that grpc's
# "future" here is also a grpc.RpcError.
with self._operational_lock:
if self._should_terminate(future):
self._finalize(future)
elif not self._should_recover(future):
self._finalize(future)
else:
_LOGGER.debug("Re-opening stream from gRPC callback.")
self._reopen()
def _reopen(self):
with self._operational_lock:
# Another thread already managed to re-open this stream.
if self.call is not None and self.call.is_active():
_LOGGER.debug("Stream was already re-established.")
return
self.call = None
# Request generator should exit cleanly since the RPC its bound to
# has exited.
self._request_generator = None
# Note: we do not currently do any sort of backoff here. The
# assumption is that re-establishing the stream under normal
# circumstances will happen in intervals greater than 60s.
# However, it is possible in a degenerative case that the server
# closes the stream rapidly which would lead to thrashing here,
# but hopefully in those cases the server would return a non-
# retryable error.
try:
if self._reopen_throttle:
with self._reopen_throttle:
self.open()
else:
self.open()
# If re-opening or re-calling the method fails for any reason,
# consider it a terminal error and finalize the stream.
except Exception as exc:
_LOGGER.debug("Failed to re-open stream due to %s", exc)
self._finalize(exc)
raise
_LOGGER.info("Re-established stream")
def _recoverable(self, method, *args, **kwargs):
"""Wraps a method to recover the stream and retry on error.
If a retryable error occurs while making the call, then the stream will
be re-opened and the method will be retried. This happens indefinitely
so long as the error is a retryable one. If an error occurs while
re-opening the stream, then this method will raise immediately and
trigger finalization of this object.
Args:
method (Callable[..., Any]): The method to call.
args: The args to pass to the method.
kwargs: The kwargs to pass to the method.
"""
while True:
try:
return method(*args, **kwargs)
except Exception as exc:
with self._operational_lock:
_LOGGER.debug("Call to retryable %r caused %s.", method, exc)
if self._should_terminate(exc):
self.close()
_LOGGER.debug("Terminating %r due to %s.", method, exc)
self._finalize(exc)
break
if not self._should_recover(exc):
self.close()
_LOGGER.debug("Not retrying %r due to %s.", method, exc)
self._finalize(exc)
raise exc
_LOGGER.debug("Re-opening stream from retryable %r.", method)
self._reopen()
def _send(self, request):
# Grab a reference to the RPC call. Because another thread (notably
# the gRPC error thread) can modify self.call (by invoking reopen),
# we should ensure our reference can not change underneath us.
# If self.call is modified (such as replaced with a new RPC call) then
# this will use the "old" RPC, which should result in the same
# exception passed into gRPC's error handler being raised here, which
# will be handled by the usual error handling in retryable.
with self._operational_lock:
call = self.call
if call is None:
raise ValueError("Can not send() on an RPC that has never been open()ed.")
# Don't use self.is_active(), as ResumableBidiRpc will overload it
# to mean something semantically different.
if call.is_active():
self._request_queue.put(request)
pass
else:
# calling next should cause the call to raise.
next(call)
def send(self, request):
return self._recoverable(self._send, request)
def _recv(self):
with self._operational_lock:
call = self.call
if call is None:
raise ValueError("Can not recv() on an RPC that has never been open()ed.")
return next(call)
def recv(self):
return self._recoverable(self._recv)
@property
def is_active(self):
"""bool: True if this stream is currently open and active."""
# Use the operational lock. It's entirely possible for something
# to check the active state *while* the RPC is being retried.
# Also, use finalized to track the actual terminal state here.
# This is because if the stream is re-established by the gRPC thread
# it's technically possible to check this between when gRPC marks the
# RPC as inactive and when gRPC executes our callback that re-opens
# the stream.
with self._operational_lock:
return self.call is not None and not self._finalized
class BackgroundConsumer(object):
"""A bi-directional stream consumer that runs in a separate thread.
This maps the consumption of a stream into a callback-based model. It also
provides :func:`pause` and :func:`resume` to allow for flow-control.
Example::
def should_recover(exc):
return (
isinstance(exc, grpc.RpcError) and
exc.code() == grpc.StatusCode.UNVAILABLE)
initial_request = example_pb2.StreamingRpcRequest(
setting='example')
rpc = ResumeableBidiRpc(
stub.StreamingRpc,
initial_request=initial_request,
should_recover=should_recover)
def on_response(response):
print(response)
consumer = BackgroundConsumer(rpc, on_response)
consumer.start()
Note that error handling *must* be done by using the provided
``bidi_rpc``'s ``add_done_callback``. This helper will automatically exit
whenever the RPC itself exits and will not provide any error details.
Args:
bidi_rpc (BidiRpc): The RPC to consume. Should not have been
``open()``ed yet.
on_response (Callable[[protobuf.Message], None]): The callback to
be called for every response on the stream.
"""
def __init__(self, bidi_rpc, on_response):
self._bidi_rpc = bidi_rpc
self._on_response = on_response
self._paused = False
self._wake = threading.Condition()
self._thread = None
self._operational_lock = threading.Lock()
def _on_call_done(self, future):
# Resume the thread if it's paused, this prevents blocking forever
# when the RPC has terminated.
self.resume()
def _thread_main(self, ready):
try:
ready.set()
self._bidi_rpc.add_done_callback(self._on_call_done)
self._bidi_rpc.open()
while self._bidi_rpc.is_active:
# Do not allow the paused status to change at all during this
# section. There is a condition where we could be resumed
# between checking if we are paused and calling wake.wait(),
# which means that we will miss the notification to wake up
# (oops!) and wait for a notification that will never come.
# Keeping the lock throughout avoids that.
# In the future, we could use `Condition.wait_for` if we drop
# Python 2.7.
with self._wake:
while self._paused:
_LOGGER.debug("paused, waiting for waking.")
self._wake.wait()
_LOGGER.debug("woken.")
_LOGGER.debug("waiting for recv.")
response = self._bidi_rpc.recv()
_LOGGER.debug("recved response.")
self._on_response(response)
except exceptions.GoogleAPICallError as exc:
_LOGGER.debug(
"%s caught error %s and will exit. Generally this is due to "
"the RPC itself being cancelled and the error will be "
"surfaced to the calling code.",
_BIDIRECTIONAL_CONSUMER_NAME,
exc,
exc_info=True,
)
except Exception as exc:
_LOGGER.exception(
"%s caught unexpected exception %s and will exit.",
_BIDIRECTIONAL_CONSUMER_NAME,
exc,
)
_LOGGER.info("%s exiting", _BIDIRECTIONAL_CONSUMER_NAME)
def start(self):
"""Start the background thread and begin consuming the thread."""
with self._operational_lock:
ready = threading.Event()
thread = threading.Thread(
name=_BIDIRECTIONAL_CONSUMER_NAME,
target=self._thread_main,
args=(ready,),
)
thread.daemon = True
thread.start()
# Other parts of the code rely on `thread.is_alive` which
# isn't sufficient to know if a thread is active, just that it may
# soon be active. This can cause races. Further protect
# against races by using a ready event and wait on it to be set.
ready.wait()
self._thread = thread
_LOGGER.debug("Started helper thread %s", thread.name)
def stop(self):
"""Stop consuming the stream and shutdown the background thread."""
with self._operational_lock:
self._bidi_rpc.close()
if self._thread is not None:
# Resume the thread to wake it up in case it is sleeping.
self.resume()
self._thread.join()
self._thread = None
@property
def is_active(self):
"""bool: True if the background thread is active."""
return self._thread is not None and self._thread.is_alive()
def pause(self):
"""Pauses the response stream.
This does *not* pause the request stream.
"""
with self._wake:
self._paused = True
def resume(self):
"""Resumes the response stream."""
with self._wake:
self._paused = False
self._wake.notifyAll()
@property
def is_paused(self):
"""bool: True if the response stream is paused."""
return self._paused
| 36.894231 | 89 | 0.615176 |
import collections
import datetime
import logging
import threading
import time
from six.moves import queue
from google.api_core import exceptions
_LOGGER = logging.getLogger(__name__)
_BIDIRECTIONAL_CONSUMER_NAME = "Thread-ConsumeBidirectionalStream"
class _RequestQueueGenerator(object):
def __init__(self, queue, period=1, initial_request=None):
self._queue = queue
self._period = period
self._initial_request = initial_request
self.call = None
def _is_active(self):
if self.call is not None and not self.call.is_active():
return False
else:
return True
def __iter__(self):
if self._initial_request is not None:
if callable(self._initial_request):
yield self._initial_request()
else:
yield self._initial_request
while True:
try:
item = self._queue.get(timeout=self._period)
except queue.Empty:
if not self._is_active():
_LOGGER.debug(
"Empty queue and inactive call, exiting request " "generator."
)
return
else:
# call is still active, keep waiting for queue items.
continue
# The consumer explicitly sent "None", indicating that the request
# should end.
if item is None:
_LOGGER.debug("Cleanly exiting request generator.")
return
if not self._is_active():
# We have an item, but the call is closed. We should put the
# item back on the queue so that the next call can consume it.
self._queue.put(item)
_LOGGER.debug(
"Inactive call, replacing item on queue and exiting "
"request generator."
)
return
yield item
class _Throttle(object):
def __init__(self, access_limit, time_window):
if access_limit < 1:
raise ValueError("access_limit argument must be positive")
if time_window <= datetime.timedelta(0):
raise ValueError("time_window argument must be a positive timedelta")
self._time_window = time_window
self._access_limit = access_limit
self._past_entries = collections.deque(maxlen=access_limit) # least recent first
self._entry_lock = threading.Lock()
def __enter__(self):
with self._entry_lock:
cutoff_time = datetime.datetime.now() - self._time_window
# drop the entries that are too old, as they are no longer relevant
while self._past_entries and self._past_entries[0] < cutoff_time:
self._past_entries.popleft()
if len(self._past_entries) < self._access_limit:
self._past_entries.append(datetime.datetime.now())
return 0.0 # no waiting was needed
to_wait = (self._past_entries[0] - cutoff_time).total_seconds()
time.sleep(to_wait)
self._past_entries.append(datetime.datetime.now())
return to_wait
def __exit__(self, *_):
pass
def __repr__(self):
return "{}(access_limit={}, time_window={})".format(
self.__class__.__name__,
self._access_limit,
repr(self._time_window),
)
class BidiRpc(object):
def __init__(self, start_rpc, initial_request=None, metadata=None):
self._start_rpc = start_rpc
self._initial_request = initial_request
self._rpc_metadata = metadata
self._request_queue = queue.Queue()
self._request_generator = None
self._is_active = False
self._callbacks = []
self.call = None
def add_done_callback(self, callback):
self._callbacks.append(callback)
def _on_call_done(self, future):
for callback in self._callbacks:
callback(future)
def open(self):
if self.is_active:
raise ValueError("Can not open an already open stream.")
request_generator = _RequestQueueGenerator(
self._request_queue, initial_request=self._initial_request
)
call = self._start_rpc(iter(request_generator), metadata=self._rpc_metadata)
request_generator.call = call
# TODO: api_core should expose the future interface for wrapped
# callables as well.
if hasattr(call, "_wrapped"): # pragma: NO COVER
call._wrapped.add_done_callback(self._on_call_done)
else:
call.add_done_callback(self._on_call_done)
self._request_generator = request_generator
self.call = call
def close(self):
if self.call is None:
return
self._request_queue.put(None)
self.call.cancel()
self._request_generator = None
# Don't set self.call to None. Keep it around so that send/recv can
def send(self, request):
if self.call is None:
raise ValueError("Can not send() on an RPC that has never been open()ed.")
# to mean something semantically different.
if self.call.is_active():
self._request_queue.put(request)
else:
# calling next should cause the call to raise.
next(self.call)
def recv(self):
if self.call is None:
raise ValueError("Can not recv() on an RPC that has never been open()ed.")
return next(self.call)
@property
def is_active(self):
return self.call is not None and self.call.is_active()
@property
def pending_requests(self):
return self._request_queue.qsize()
def _never_terminate(future_or_error):
return False
class ResumableBidiRpc(BidiRpc):
def __init__(
self,
start_rpc,
should_recover,
should_terminate=_never_terminate,
initial_request=None,
metadata=None,
throttle_reopen=False,
):
super(ResumableBidiRpc, self).__init__(start_rpc, initial_request, metadata)
self._should_recover = should_recover
self._should_terminate = should_terminate
self._operational_lock = threading.RLock()
self._finalized = False
self._finalize_lock = threading.Lock()
if throttle_reopen:
self._reopen_throttle = _Throttle(
access_limit=5, time_window=datetime.timedelta(seconds=10),
)
else:
self._reopen_throttle = None
def _finalize(self, result):
with self._finalize_lock:
if self._finalized:
return
for callback in self._callbacks:
callback(result)
self._finalized = True
def _on_call_done(self, future):
# Unlike the base class, we only execute the callbacks on a terminal
# error, not for errors that we can recover from. Note that grpc's
with self._operational_lock:
if self._should_terminate(future):
self._finalize(future)
elif not self._should_recover(future):
self._finalize(future)
else:
_LOGGER.debug("Re-opening stream from gRPC callback.")
self._reopen()
def _reopen(self):
with self._operational_lock:
if self.call is not None and self.call.is_active():
_LOGGER.debug("Stream was already re-established.")
return
self.call = None
self._request_generator = None
try:
if self._reopen_throttle:
with self._reopen_throttle:
self.open()
else:
self.open()
except Exception as exc:
_LOGGER.debug("Failed to re-open stream due to %s", exc)
self._finalize(exc)
raise
_LOGGER.info("Re-established stream")
def _recoverable(self, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except Exception as exc:
with self._operational_lock:
_LOGGER.debug("Call to retryable %r caused %s.", method, exc)
if self._should_terminate(exc):
self.close()
_LOGGER.debug("Terminating %r due to %s.", method, exc)
self._finalize(exc)
break
if not self._should_recover(exc):
self.close()
_LOGGER.debug("Not retrying %r due to %s.", method, exc)
self._finalize(exc)
raise exc
_LOGGER.debug("Re-opening stream from retryable %r.", method)
self._reopen()
def _send(self, request):
# will be handled by the usual error handling in retryable.
with self._operational_lock:
call = self.call
if call is None:
raise ValueError("Can not send() on an RPC that has never been open()ed.")
# Don't use self.is_active(), as ResumableBidiRpc will overload it
if call.is_active():
self._request_queue.put(request)
pass
else:
next(call)
def send(self, request):
return self._recoverable(self._send, request)
def _recv(self):
with self._operational_lock:
call = self.call
if call is None:
raise ValueError("Can not recv() on an RPC that has never been open()ed.")
return next(call)
def recv(self):
return self._recoverable(self._recv)
@property
def is_active(self):
# to check the active state *while* the RPC is being retried.
# Also, use finalized to track the actual terminal state here.
# This is because if the stream is re-established by the gRPC thread
# it's technically possible to check this between when gRPC marks the
with self._operational_lock:
return self.call is not None and not self._finalized
class BackgroundConsumer(object):
def __init__(self, bidi_rpc, on_response):
self._bidi_rpc = bidi_rpc
self._on_response = on_response
self._paused = False
self._wake = threading.Condition()
self._thread = None
self._operational_lock = threading.Lock()
def _on_call_done(self, future):
# when the RPC has terminated.
self.resume()
def _thread_main(self, ready):
try:
ready.set()
self._bidi_rpc.add_done_callback(self._on_call_done)
self._bidi_rpc.open()
while self._bidi_rpc.is_active:
# Do not allow the paused status to change at all during this
# section. There is a condition where we could be resumed
# between checking if we are paused and calling wake.wait(),
# which means that we will miss the notification to wake up
# (oops!) and wait for a notification that will never come.
# Keeping the lock throughout avoids that.
# In the future, we could use `Condition.wait_for` if we drop
# Python 2.7.
with self._wake:
while self._paused:
_LOGGER.debug("paused, waiting for waking.")
self._wake.wait()
_LOGGER.debug("woken.")
_LOGGER.debug("waiting for recv.")
response = self._bidi_rpc.recv()
_LOGGER.debug("recved response.")
self._on_response(response)
except exceptions.GoogleAPICallError as exc:
_LOGGER.debug(
"%s caught error %s and will exit. Generally this is due to "
"the RPC itself being cancelled and the error will be "
"surfaced to the calling code.",
_BIDIRECTIONAL_CONSUMER_NAME,
exc,
exc_info=True,
)
except Exception as exc:
_LOGGER.exception(
"%s caught unexpected exception %s and will exit.",
_BIDIRECTIONAL_CONSUMER_NAME,
exc,
)
_LOGGER.info("%s exiting", _BIDIRECTIONAL_CONSUMER_NAME)
def start(self):
with self._operational_lock:
ready = threading.Event()
thread = threading.Thread(
name=_BIDIRECTIONAL_CONSUMER_NAME,
target=self._thread_main,
args=(ready,),
)
thread.daemon = True
thread.start()
# Other parts of the code rely on `thread.is_alive` which
# isn't sufficient to know if a thread is active, just that it may
ready.wait()
self._thread = thread
_LOGGER.debug("Started helper thread %s", thread.name)
def stop(self):
with self._operational_lock:
self._bidi_rpc.close()
if self._thread is not None:
self.resume()
self._thread.join()
self._thread = None
@property
def is_active(self):
return self._thread is not None and self._thread.is_alive()
def pause(self):
with self._wake:
self._paused = True
def resume(self):
with self._wake:
self._paused = False
self._wake.notifyAll()
@property
def is_paused(self):
return self._paused
| true | true |
f73c7dc638bbab307915231dccfffeb890f86c05 | 637 | py | Python | users/models.py | mohilkhare1708/descriptiveAnswerChecker | 839404e807f884afd8b59e6f2eebfbc8b1189e83 | [
"MIT"
] | 2 | 2021-02-15T20:50:47.000Z | 2022-02-14T18:31:30.000Z | users/models.py | mohilkhare1708/descriptiveAnswerChecker | 839404e807f884afd8b59e6f2eebfbc8b1189e83 | [
"MIT"
] | null | null | null | users/models.py | mohilkhare1708/descriptiveAnswerChecker | 839404e807f884afd8b59e6f2eebfbc8b1189e83 | [
"MIT"
] | 1 | 2022-01-11T15:10:50.000Z | 2022-01-11T15:10:50.000Z | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
full_name = models.CharField(max_length=100)
email = models.EmailField(max_length=150)
phone = models.CharField(max_length=10)
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def update_profile_signal(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
| 31.85 | 63 | 0.756672 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
full_name = models.CharField(max_length=100)
email = models.EmailField(max_length=150)
phone = models.CharField(max_length=10)
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def update_profile_signal(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
| true | true |
f73c7e0c7624f7ad271c26fe3c8dd34a3ac22240 | 263 | py | Python | StreamClient.py | tdz-ia/NVSS | 741b8884df0ab9862cb98e45d60470d34f8a8e4a | [
"BSD-3-Clause"
] | 2 | 2020-09-03T06:47:44.000Z | 2022-02-26T09:42:53.000Z | StreamClient.py | tdz-ia/NVSS | 741b8884df0ab9862cb98e45d60470d34f8a8e4a | [
"BSD-3-Clause"
] | null | null | null | StreamClient.py | tdz-ia/NVSS | 741b8884df0ab9862cb98e45d60470d34f8a8e4a | [
"BSD-3-Clause"
] | null | null | null | # ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under NVIDIA Simple Streamer License
from StreamingTools import StreamClient
if __name__ == "__main__":
client = StreamClient()
| 21.916667 | 62 | 0.692015 |
from StreamingTools import StreamClient
if __name__ == "__main__":
client = StreamClient()
| true | true |
f73c7e9707c8389ee8ccde591fa64b8a9c64780b | 666 | py | Python | LEETCODE/leetcode-javascript-master/215 Kth Largest Element in an Array.py | bgoonz/DS-n-Algos-Mega-Archive | 54f41b5a73d67a35bddb911736f0f88c49b7b895 | [
"MIT",
"Unlicense"
] | null | null | null | LEETCODE/leetcode-javascript-master/215 Kth Largest Element in an Array.py | bgoonz/DS-n-Algos-Mega-Archive | 54f41b5a73d67a35bddb911736f0f88c49b7b895 | [
"MIT",
"Unlicense"
] | 25 | 2021-05-02T10:59:18.000Z | 2021-05-14T10:02:22.000Z | LEETCODE/leetcode-javascript-master/215 Kth Largest Element in an Array.py | bgoonz/DS-n-Algos-Mega-Archive | 54f41b5a73d67a35bddb911736f0f88c49b7b895 | [
"MIT",
"Unlicense"
] | 1 | 2021-11-26T20:43:19.000Z | 2021-11-26T20:43:19.000Z | class Solution(object):
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
pivot = random.choice(nums);
nums1, nums2 = [], []
for num in nums:
if num > pivot:
nums1.append(num)
elif num < pivot:
nums2.append(num)
if k <= len(nums1):
return self.findKthLargest(nums1, k)
if k > len(nums) - len(nums2): # draw a graph to visualize it! It's not in the top k assortment, but in the small section
return self.findKthLargest(nums2, k - (len(nums) - len(nums2)))
return pivot | 30.272727 | 129 | 0.528529 | class Solution(object):
def findKthLargest(self, nums, k):
pivot = random.choice(nums);
nums1, nums2 = [], []
for num in nums:
if num > pivot:
nums1.append(num)
elif num < pivot:
nums2.append(num)
if k <= len(nums1):
return self.findKthLargest(nums1, k)
if k > len(nums) - len(nums2):
return self.findKthLargest(nums2, k - (len(nums) - len(nums2)))
return pivot | true | true |
f73c7edd2f0f3e957b3ed2c0f44f75ecaf5855d7 | 115 | py | Python | src/__init__.py | SLD3V/MASK | 8d288d8f703969b4fe22a8ddb56db3734d4d1386 | [
"BSD-2-Clause"
] | null | null | null | src/__init__.py | SLD3V/MASK | 8d288d8f703969b4fe22a8ddb56db3734d4d1386 | [
"BSD-2-Clause"
] | null | null | null | src/__init__.py | SLD3V/MASK | 8d288d8f703969b4fe22a8ddb56db3734d4d1386 | [
"BSD-2-Clause"
] | null | null | null | # importing module constants from .
from . import constants
# importing module helper from .
from . import helper
| 19.166667 | 35 | 0.765217 |
from . import constants
from . import helper
| true | true |
f73c80498d3c644e050d185faec90ed7788a85ea | 33,840 | py | Python | migratecomware.py | zabrewer/automation-scripts | cd9f4ab8227333412baeba659d5a9fea4343e78f | [
"MIT"
] | 1 | 2021-03-15T16:49:05.000Z | 2021-03-15T16:49:05.000Z | migratecomware.py | zabrewer/automation-scripts | cd9f4ab8227333412baeba659d5a9fea4343e78f | [
"MIT"
] | null | null | null | migratecomware.py | zabrewer/automation-scripts | cd9f4ab8227333412baeba659d5a9fea4343e78f | [
"MIT"
] | null | null | null | # This is a script to migrate infrastructure from Comware-based switches, such as the
# HPE A-series, to Meraki MS switches. The script reads an input file which defines which
# Comware switch will be migrated to which MS. Configuration is read from Comware through SSH,
# converted to Meraki form and uploaded to the Meraki cloud using the Dashboard API.
#
# Comware devices are referenced by IP address. Meraki devices are referenced by serial number.
#
# You need to have Python 3 and the Requests module installed. You
# can download the module here: https://github.com/kennethreitz/requests
# or install it using pip.
#
# The script also requires the Paramiko module for SSH functions. More info about installing Paramiko
# can be found here: http://www.paramiko.org/installing.html
#
# This script uses spaces for indentation. Do not use the Tab character when modifying it.
#
# To run the script, enter:
# python migratecomware.py -k <API key> -o <org name> -i <init file> [-u <default user>] [-p <default pass>] [-m <operating mode>]
#
# To make script chaining easier, all lines containing informational messages to the user
# start with the character @
#
#HOW TO CREATE AN INITIALIZATION FILE:
#An initialization file with device mappings is required for migratecomware.py
#
#For an example of a correct init config file, please see:
# https://github.com/meraki/automation-scripts/blob/master/migration_init_file.txt
#
#Initialization file #Syntax:
# * Blank lines and lines only containing whitespace will be ignored.
# * Use lines beginning with # as comments. These lines will be ignored.
# * Use "net=Network_name" to define a network. A network definition line must exist before any
# device definition lines.
# * Device definition lines. These lines define the IP address of the original Comware switch,
# the Meraki MS switch serial number the configuration will be transferred to and optionally
# a SSH username and password to log into the Comware device. If username and password are
# omitted, default credentials will be used. These lines can have four forms:
# <device_ip> <serial_number>
# <device_ip> <serial_number> <username> <password>
# file <filename> <serial_number>
#
#Examples of net definition and device definition lines, commented out:
#
#net=Migrated headquarters network
#10.1.1.20 AAAA-BBBB-CCCC admin admin
#10.1.1.21 AAAA-BBBB-DDDD admin@system admin123
#file myconfig.cfg BBBB-CCCC-DDDD
#
#net=Migrated branch network
#192.168.10.10 AAAA-BBBB-EEEE
import sys, getopt, requests, json, paramiko, re
class c_portconfig:
def __init__(self):
self.name = '' #WORD
self.type = 'null' #copper speed or sfp
self.number = '0' #number of this type of interface type+number must be a unique combination
self.mode = 'access' #access or trunk
self.vlan = '1' #access VLAN or trunk native VLAN
self.allowedvlans = '' #trunk allowed VLANs
self.enabled = 'true' #values: true/false
self.voicevlan = '' #voice VLAN
self.poeenabled = '' #values: true/false
self.rstp = ''
self.isolation = ''
self.stpguard = ''
#end class
class c_merakidevice:
def __init__(self):
self.hostname= 'unnamed'#hostname for device
self.serial = '' #serial number of destination device
self.netname = '' #network this device belongs to
self.srcip = '' #source device IP address to pull config from. leave blank if file
self.srcfile = '' #source file to pull config from. leave blank if IP/SSH
self.srcuser = '' #source SSH username. leave blank if file
self.srcpass = '' #source SSH password. leave blank if file
self.rawcfg = [] #raw configuration as extracted from source. fields are strings
self.portcfg = [] #port configuration of this device. fields are instances of c_portconfig()
#end class
def printusertext(p_message):
#prints a line of text that is meant for the user to read
#do not process these lines when chaining scripts
print('@ %s' % p_message)
def printhelp():
#prints help text
printusertext('')
printusertext('This is a script to migrate infrastructure from Comware-based switches, such as the')
printusertext(' HPE A-series, to Meraki MS switches. The script reads an input file which defines which')
printusertext(' Comware switch will be migrated to which MS. Configuration is read from Comware through SSH,')
printusertext(' converted to Meraki form and uploaded to the Meraki cloud using the Dashboard API.')
printusertext('')
printusertext('To run the script, enter:')
printusertext('python migratecomware.py -k <API key> -o <org> -i <init file> [-u <default user>] [-p <default pass>] [-m <mode>]')
printusertext('')
printusertext('The script needs a valid initialization configuration file to run (parameter -i).')
printusertext(" For syntax help please see the comment lines in the beginning of this script's code.")
printusertext('')
printusertext('Parameter "-m" has 3 valid forms:')
printusertext(' * -m simulation : This is the default mode. The script will print to output a simulation')
printusertext(' of what changes will be made to what switch. If the target devices are not part of the')
printusertext(' organization defined in "-o", the script will fail.')
printusertext(' * -m simulation+claim : The script will print to output a simulation')
printusertext(' of what changes will be made to what switch. If the target devices are not part of the')
printusertext(' organization defined in "-o", the script will attempt to claim it and read needed info.')
printusertext(' * -m commit : The script will migrate Comware configuration to the Meraki cloud.')
printusertext('')
printusertext(' Example:')
printusertext(' python migratecomware.py -k 1234 -o MyOrg -i initconfig.txt -u foo -p bar -m commit')
printusertext('')
printusertext('Use double quotes ("") in Windows to pass arguments containing spaces. Names are case-sensitive.')
### SECTION: Functions for interacting with SSH and files
def loadinitcfg(p_filename, p_defaultuser, p_defaultpass):
#loads initial configuration from a file with network and device definitions
configtable = [] #to be filled with c_merakidevice() instances
networkdefined = False
currentnet = ''
dcount = 0
linenum = 0
try:
f = open(p_filename, 'r')
except:
return(configtable)
#iterate through file and parse lines
for line in f:
linenum += 1
stripped = line.strip()
#drop blank lines
if len(stripped) > 0:
#drop comments
if stripped[0] != '#':
#process network definition lines
if stripped [:4] == 'net=':
if len(stripped[4:]) > 0:
currentnet = stripped[4:]
networkdefined = True
else:
printusertext('ERROR: Init config (line %d): Network name cannot be blank' % linenum)
sys.exit(2)
else:
#else process as a device record
if networkdefined:
splitline = stripped.split()
if len(splitline) > 1:
#look for file keyword and load config accordingly
if splitline[0] == 'file':
if len(splitline) > 2:
configtable.append(c_merakidevice())
configtable[dcount].netname = currentnet
configtable[dcount].srcfile = splitline[1]
configtable[dcount].serial = splitline[2]
dcount += 1
else:
printusertext('ERROR: Init config (line %d): Invalid definition: %s' % (linenum, stripped))
sys.exit(2)
else:
#not a source file definition. assume FQDN/IP
configtable.append(c_merakidevice())
configtable[dcount].netname = currentnet
configtable[dcount].srcip = splitline[0]
configtable[dcount].serial = splitline[1]
if len(splitline) > 3:
#device-specific username and password defined
configtable[dcount].srcuser = splitline[2]
configtable[dcount].srcpass = splitline[3]
elif len(splitline) > 2:
#got either username or password, but not both
printusertext('ERROR: Init config (line %d): Invalid definition: %s' % (linenum, stripped))
sys.exit(2)
else:
#no device-specific username/password configuration. use defaults
#abort if default user/password are invalid
if p_defaultuser == '\n' or p_defaultpass == '\n':
printusertext('ERROR: Default SSH credentials needed, but not defined')
sys.exit(2)
configtable[dcount].srcuser = p_defaultuser
configtable[dcount].srcpass = p_defaultpass
dcount += 1
else:
printusertext('ERROR: Init config (line %d): Invalid definition: %s' % (linenum, stripped))
sys.exit(2)
else:
printusertext('ERROR: Init config (line %d): Device with no network defined' % linenum)
sys.exit(2)
dcount += 1
f.close()
return (configtable)
def loadcomwareconfig (p_hostip, p_user, p_pass):
#logs into a comware-based device using SSH and pulls its current configuration
#returns a single line 'null' on SSH errors
linetable = []
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(p_hostip, username=p_user, password=p_pass)
stdin, stdout, stderr = ssh.exec_command("display current")
#THE LINE BELOW IS USED TO DISMISS "MORE" PROMPTS WHEN DISPLAYING CONFIG. ADJUST # OF SPACES IF NEEDED
stdin.write(' \n')
stdin.flush()
except:
printusertext('WARNING: Could not connect to source device: %s' % p_hostip)
linetable.append('null')
return (linetable)
strippedline = []
initiated = False
for line in stdout.read().splitlines():
if len(line) > 0:
strippedline = line.strip().decode('ascii')
# drop all lines before the first prompt (login banner, etc)
# a login banner line starting with "<" and ending with ">" may cause the script to fail
# check for sequence '<hostname>'
if strippedline.startswith('<') and strippedline.endswith('>'):
initiated = True
if initiated and strippedline[0] != '<':
# check all long lines to see if they start with " ---- More ----"
if len(strippedline) > 15:
# look for sequence "---"
if strippedline[:3] == '---':
# remove garbage from beginning of line
strippedline = strippedline[19:].lstrip()[5:].lstrip()
# drop comments, check for character 35: "#"
if strippedline[0] != '#':
# store ascii representations of received characters
linetable.append(strippedline)
return (linetable)
def loadcomwarecfgfile(p_filename):
#loads source device configuration from file
linetable = []
try:
f = open(p_filename, 'r')
except:
linetable.append('null')
printusertext('WARNING: Could not read source config file: %s' % p_filename)
return(linetable)
strippedline = ''
for line in f:
strippedline = line.strip()
if len(strippedline) > 0:
#ignore comments
if strippedline[0] != '#':
linetable.append(strippedline)
f.close()
return (linetable)
def extracthostname(p_rawcfg):
#extract hostname form device config
#command parser loop
for cfgline in p_rawcfg:
pieces = cfgline.split()
if pieces[0] == 'sysname':
return (pieces[1])
return ('')
def extractportcfg(p_rawcfg):
#extracts port (interface) configuration from a comware configuration table
intcfg = []
intcount = 0
avlan = '' #string for building allowed VLAN value
supportedinterface = False
#command parser loop
for cfgline in p_rawcfg:
pieces = cfgline.split()
if pieces[0] == 'description' and supportedinterface:
#set int desc as port name. strip everything except alphanumerics and "_"
intcfg[intcount-1].name = re.sub(r'\W+','', cfgline[12:])[:20]
elif pieces[0] == 'interface':
#if interface is of a supported type, create new entry. otherwise ignore it
#and lock int command parsing functions until a supported one comes up
if pieces[1][:15] == 'GigabitEthernet':
intcfg.append(c_portconfig())
intcfg[intcount].type = 'GigabitEthernet'
#WARNING: THE LINE BELOW ONLY WORKS PROPERLY FOR 1RU SWITCHES
intcfg[intcount].number = pieces[1].split('/')[-1] #only take last number in string
intcount += 1
supportedinterface = True
else:
supportedinterface = False
elif pieces[0] == 'port' and supportedinterface:
if pieces[1] == 'access':
if pieces[2] == 'vlan':
intcfg[intcount-1].vlan = pieces[3]
if pieces[1] == 'link-type':
intcfg[intcount-1].mode = pieces[2]
if pieces[1] == 'trunk':
if pieces[2] == 'permit':
#example Comware command: port link-type trunk permit vlan 10 50 to 60
if pieces[3] == 'vlan':
avlan = ''
for i in range(4, len(pieces)):
if pieces[i] == 'to':
avlan += '-'
else:
if len(avlan) == 0:
avlan += pieces[i]
elif avlan[len(avlan)-1] == '-':
avlan += pieces[i]
else:
avlan += ',%s' % pieces[i]
intcfg[intcount-1].allowedvlans = avlan
if pieces[2] == 'pvid':
if pieces[3] == 'vlan':
intcfg[intcount-1].vlan = pieces[4]
#elif pieces[0] == 'port-security':
#DEBUG: keep the line below commented, unless debugging this function
#printusertext ('DEBUG: Port security: %s' % pieces[1])
# if intcount == 0:
#still in global config
# if pieces[1] == 'enable':
#printusertext ('DEBUG: Enable port-security')
# continue
elif pieces[0] == 'shutdown' and supportedinterface:
intcfg[intcount-1].enabled = 'false'
#elif pieces[0] == 'undo' and supportedinterface:
#DEBUG: keep the line below commented, unless debugging this function
#printusertext ('DEBUG: Undo for int [%d]: %s' % (intcount, pieces[1]))
# if pieces[1] == 'dot1x':
#printusertext ('DEBUG: Dot1x: %s' % pieces[2])
# continue
#else:
#DEBUG: keep the line below commented, unless debugging this function
#print ('DEBUG: Invalid line')
return(intcfg)
### SECTION: Functions for interacting with Dashboard
def getorgid(p_apikey, p_orgname):
#looks up org id for a specific org name
#on failure returns 'null'
r = requests.get('https://dashboard.meraki.com/api/v0/organizations', headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return 'null'
rjson = r.json()
for record in rjson:
if record['name'] == p_orgname:
return record['id']
return('null')
def getshardurl(p_apikey, p_orgid):
#patch
return("api-mp.meraki.com")
def getnwid(p_apikey, p_shardurl, p_orgid, p_nwname):
#looks up network id for a network name
#on failure returns 'null'
r = requests.get('https://%s/api/v0/organizations/%s/networks' % (p_shardurl, p_orgid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return 'null'
rjson = r.json()
for record in rjson:
if record['name'] == p_nwname:
return record['id']
return('null')
def createnw(p_apikey, p_shardurl, p_dstorg, p_nwdata):
#creates network if one does not already exist with the same name
#example for p_nwdata:
#nwparams = {'name': 'hi', 'timeZone': 'Europe/Helsinki', 'tags': 'mytag', 'organizationId': '123', 'type': 'switch appliance'}
#check if network exists
getnwresult = getnwid(p_apikey, p_shardurl, p_dstorg, p_nwdata['name'])
if getnwresult != 'null':
printusertext('WARNING: Skipping network "%s" (Already exists)' % p_nwdata['name'])
return('null')
if p_nwdata['type'] == 'combined':
#find actual device types
nwtype = 'wireless switch appliance'
else:
nwtype = p_nwdata['type']
if nwtype != 'systems manager':
r = requests.post('https://%s/api/v0/organizations/%s/networks' % (p_shardurl, p_dstorg), data=json.dumps({'timeZone': p_nwdata['timeZone'], 'tags': p_nwdata['tags'], 'name': p_nwdata['name'], 'organizationId': p_dstorg, 'type': nwtype}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
else:
printusertext('WARNING: Skipping network "%s" (Cannot create SM networks)' % p_nwdata['name'])
return('null')
return('ok')
def claimdevice(p_apikey, p_shardurl, p_nwid, p_devserial):
#claims a device into an org
r = requests.post('https://%s/api/v0/networks/%s/devices/claim' % (p_shardurl, p_nwid), data=json.dumps({'serial': p_devserial}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
return(0)
def claimdeviceorg(p_apikey, p_shardurl, p_orgid, p_devserial):
#claims a device into an org without adding to a network
r = requests.post('https://%s/api/v0/organizations/%s/claim' % (p_shardurl, p_orgid), data=json.dumps({'serial': p_devserial}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
return(0)
def getorgdeviceinfo (p_apikey, p_shardurl, p_orgid, p_devserial):
#gets basic device info from org inventory. device does not need to be part of a network
r = requests.get('https://%s/api/v0/organizations/%s/inventory' % (p_shardurl, p_orgid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
returnvalue = {}
if r.status_code != requests.codes.ok:
returnvalue = {'serial':'null', 'model':'null'}
return(returnvalue)
rjson = r.json()
foundserial = False
for record in rjson:
if record['serial'] == p_devserial:
foundserial = True
returnvalue = {'mac': record['mac'], 'serial': record['serial'], 'networkId': record['networkId'], 'model': record['model'], 'claimedAt': record['claimedAt'], 'publicIp': record['publicIp']}
if not foundserial:
returnvalue = {'serial':'null', 'model':'null'}
return(returnvalue)
def setswportconfig(p_apikey, p_shardurl, p_devserial, p_portnum, p_portcfg):
#sets switchport configuration to match table given as parameter
validconfig = {}
for key, value in p_portcfg.items():
if value != '':
validconfig[key] = value
r = requests.put('https://%s/api/v0/devices/%s/switchPorts/%s' % (p_shardurl, p_devserial, p_portnum), data=json.dumps(validconfig), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
return (0)
def setdevicedata(p_apikey, p_shardurl, p_nwid, p_devserial, p_field, p_value, p_movemarker):
#modifies value of device record. Returns the new value
#on failure returns one device record, with all values 'null'
#p_movemarker is boolean: True/False
movevalue = "false"
if p_movemarker:
movevalue = "true"
r = requests.put('https://%s/api/v0/networks/%s/devices/%s' % (p_shardurl, p_nwid, p_devserial), data=json.dumps({p_field: p_value, 'moveMapMarker': movevalue}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return ('null')
return('ok')
def migratedevices(p_apikey, p_shardurl, p_orgid, p_devt, p_mode):
#migrates configuration according to device table p_devt. has three modes according to p_mode
#p_mode = 'commit' : uploads configuration to Meraki cloud
#p_mode = 'simulation': prints intended changes to stdout without touching cloud. will fail if device not in inventory
#p_mode = 'simulation+claim': prints intended changes to stdout without touching cloud. will attempt to claim devices if they are not in inventory to get info
mode_commit = False
mode_claim = False
nwid = ''
portconfig = {}
max_migrated_ports = 0
if p_mode == 'commit':
mode_commit = True
mode_claim = True
elif p_mode == 'simulation+claim':
mode_claim = True
for dev in p_devt:
nwid = getnwid(p_apikey, p_shardurl, p_orgid, dev.netname)
if nwid == 'null' and mode_commit:
#if nw missing and commit mode, it needs to be created
#nwid == 'null' is OK if running simulation
#NOTE THAT TIMEZONE IS HARDCODED IN THE SCRIPT AT THIS POINT. THIS MAY CHANGE IN A LATER VERSION
nwparams = {'name': dev.netname, 'timeZone': 'Europe/Helsinki', 'tags': 'migratecomwarepy', 'organizationId': p_orgid, 'type': 'switch'}
createnw(p_apikey, p_shardurl, p_orgid, nwparams)
nwid = getnwid(p_apikey, p_shardurl, p_orgid, dev.netname)
#check if something went wrong
if nwid == 'null':
printusertext('ERROR: Unable to get ID for network %s' % dev.netname)
sys.exit(2)
#get model of device to check that it is a switch
devinfo = getorgdeviceinfo (p_apikey, p_shardurl, p_orgid, dev.serial)
if devinfo['model'] == 'null':
if mode_claim:
claimdeviceorg(p_apikey, p_shardurl, p_orgid, dev.serial)
devinfo = getorgdeviceinfo (p_apikey, p_shardurl, p_orgid, dev.serial)
if devinfo['model'] == 'null':
printusertext('ERROR: Unable to claim device %s' % dev.serial)
sys.exit(2)
else:
printusertext('ERROR: Device %s not part of org %s' % (dev.serial, p_orgid))
sys.exit(2)
if devinfo['model'][:2] != 'MS':
printusertext('ERROR: Device %s is type "%s": Not a switch' % (dev.serial, devinfo['model']))
sys.exit(2)
#at this stage we have nwid and device model
#the switch may or may not be part of a network, so cannot read number of ports dynamically.
#it will need to be done as part of a static configuration list
#assumes model name convention of MXxxx-yyzz, where xxx: model series, yy:number of ports, zz:poe
modelnumber = re.sub(r'[^0-9]','',devinfo['model'][:5])
portnumber = re.sub(r'[^0-9]','',devinfo['model'][6:])
if modelnumber == '220':
if portnumber == '8':
max_migrated_ports = 10
elif portnumber == '24':
max_migrated_ports = 28
elif portnumber == '48':
max_migrated_ports = 52
elif modelnumber == '225':
if portnumber == '24':
max_migrated_ports = 28
elif portnumber == '48':
max_migrated_ports = 52
elif modelnumber == '250':
if portnumber == '24':
max_migrated_ports = 28
elif portnumber == '48':
max_migrated_ports = 52
elif modelnumber == '350':
if portnumber == '24':
max_migrated_ports = 28
elif portnumber == '48':
max_migrated_ports = 52
elif modelnumber == '410':
if portnumber == '16':
max_migrated_ports = 18
elif portnumber == '32':
max_migrated_ports = 34
elif modelnumber == '425':
if portnumber == '16':
max_migrated_ports = 18
elif portnumber == '32':
max_migrated_ports = 34
else:
#if unknown device model, assume 0 uplinks as failsafe, until the script is updated to support it
intportnumber = int(portnumber)
#if Meraki switch nodel naming has changed from MSxxx-yy, the line below will fail
if intportnumber <= 48:
max_migrated_ports = intportnumber
#deal with port number mismatches
if len(dev.portcfg) < max_migrated_ports:
max_migrated_ports = len(dev.portcfg)
#now that we also know the MAC address of the device, we can also reset the hostname
#for devices that did not get a value by running extracthostname() previously
if dev.hostname == '':
dev.hostname = devinfo['mac']
#do preliminary stuff, like claiming device to nw or printing header
if mode_commit:
claimdevice(p_apikey, p_shardurl, nwid, dev.serial)
devinfo = getorgdeviceinfo (p_apikey, p_shardurl, p_orgid, dev.serial)
if devinfo['networkId'] != nwid:
printusertext('ERROR: Unable set network for device %s' % dev.serial)
sys.exit(2)
#set hostname. Don't worry if it fails
setdevicedata(p_apikey, p_shardurl, nwid, dev.serial, 'name', dev.hostname, 'false')
printusertext('INFO: Migrating device %s (name: %s), source %s%s' % (dev.serial, dev.hostname, dev.srcip, dev.srcfile))
else:
print('')
print('Migration target device %s (name: %s, %s) in network "%s"' % (dev.serial, dev.hostname, devinfo['model'],dev.netname))
print('Source: %s%s' % (dev.srcip, dev.srcfile))
print('Num Name Mode Enabled VLAN PoE VoiceVLAN TrnkAllowVLAN')
for i in range (0, max_migrated_ports):
portconfig = {'isolationEnabled': dev.portcfg[i].isolation, 'rstpEnabled': dev.portcfg[i].rstp, 'enabled': dev.portcfg[i].enabled, 'stpGuard': dev.portcfg[i].stpguard, 'accessPolicyNumber': '', 'type': dev.portcfg[i].mode, 'allowedVlans': dev.portcfg[i].allowedvlans, 'poeEnabled': dev.portcfg[i].poeenabled, 'name': dev.portcfg[i].name, 'tags': 'migratecomwarepy', 'number': dev.portcfg[i].number, 'vlan': dev.portcfg[i].vlan, 'voiceVlan': dev.portcfg[i].voicevlan}
if mode_commit:
setswportconfig(p_apikey, p_shardurl, dev.serial, dev.portcfg[i].number, portconfig)
else:
print('%s %s %s %s %s %s %s %s' % ("{:>3s}".format(portconfig['number']), "{:>20s}".format(portconfig['name']), "{:>7s}".format(portconfig['type']), "{:>6s}".format(portconfig['enabled']), "{:>5s}".format(portconfig['vlan']), "{:>7s}".format(portconfig['poeEnabled']), "{:>5s}".format(portconfig['voiceVlan']), portconfig['allowedVlans']))
return() #migratedevices()
### SECTION: Main function
def main(argv):
#set default values for command line arguments
arg_apikey = 'null'
arg_orgname = 'null'
arg_initfile = '????' #a default value that is not a valid filename
arg_defuser = '\n' #a default value that is not a valid username
arg_defpass = '\n' #a default value that is not a valid password
arg_mode = 'simulation'
#get command line arguments
# python deployappliance.py -k <key> -o <org> -s <serial> -n <network name> -t <template>
try:
opts, args = getopt.getopt(argv, 'hk:o:i:u:p:m:')
except getopt.GetoptError:
printhelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
printhelp()
sys.exit()
elif opt == '-k':
arg_apikey = arg
elif opt == '-o':
arg_orgname = arg
elif opt == '-i':
arg_initfile = arg
elif opt == '-u':
arg_defuser = arg
elif opt == '-p':
arg_defpass = arg
elif opt == '-m':
arg_mode = arg
#check if all required parameters have been given
if arg_apikey == 'null' or arg_orgname == 'null' or arg_initfile == '????':
printhelp()
sys.exit(2)
#get organization id corresponding to org name provided by user
orgid = getorgid(arg_apikey, arg_orgname)
if orgid == 'null':
printusertext('ERROR: Fetching organization failed')
sys.exit(2)
#get shard URL where Org is stored
shardurl = getshardurl(arg_apikey, orgid)
if shardurl == 'null':
printusertext('ERROR: Fetching Meraki cloud shard FQDN failed')
sys.exit(2)
#load configuration file
devices = loadinitcfg(arg_initfile, arg_defuser, arg_defpass)
if len(devices) == 0:
printusertext('ERROR: No valid configuration in init file')
sys.exit(2)
#read configuration from source devices specified in init config
for i in range(0, len(devices)):
if devices[i].srcip != '':
devices[i].rawcfg = loadcomwareconfig (devices[i].srcip, devices[i].srcuser, devices[i].srcpass)
else:
devices[i].rawcfg = loadcomwarecfgfile (devices[i].srcfile)
#extract port configuration from source configuration
for dev in devices:
dev.hostname = extracthostname(dev.rawcfg)
dev.portcfg = extractportcfg(dev.rawcfg)
#run migration function in correct operating mode
if arg_mode == 'simulation':
migratedevices(arg_apikey, shardurl, orgid, devices, 'simulation')
elif arg_mode == 'commit':
migratedevices(arg_apikey, shardurl, orgid, devices, 'commit')
elif arg_mode == 'simulation+claim':
migratedevices(arg_apikey, shardurl, orgid, devices, 'simulation+claim')
else:
printusertext('ERROR: Parameter -m: Operating mode not valid')
sys.exit(2)
printusertext('End of script.')
if __name__ == '__main__':
main(sys.argv[1:]) | 46.675862 | 479 | 0.56052 |
json, paramiko, re
class c_portconfig:
def __init__(self):
self.name = ''
self.type = 'null'
self.number = '0'
self.mode = 'access'
self.vlan = '1'
self.allowedvlans = ''
self.enabled = 'true'
self.voicevlan = ''
self.poeenabled = ''
self.rstp = ''
self.isolation = ''
self.stpguard = ''
class c_merakidevice:
def __init__(self):
self.hostname= 'unnamed'
self.serial = ''
self.netname = ''
self.srcip = ''
self.srcfile = ''
self.srcuser = ''
self.srcpass = ''
self.rawcfg = []
self.portcfg = []
def printusertext(p_message):
print('@ %s' % p_message)
def printhelp():
printusertext('')
printusertext('This is a script to migrate infrastructure from Comware-based switches, such as the')
printusertext(' HPE A-series, to Meraki MS switches. The script reads an input file which defines which')
printusertext(' Comware switch will be migrated to which MS. Configuration is read from Comware through SSH,')
printusertext(' converted to Meraki form and uploaded to the Meraki cloud using the Dashboard API.')
printusertext('')
printusertext('To run the script, enter:')
printusertext('python migratecomware.py -k <API key> -o <org> -i <init file> [-u <default user>] [-p <default pass>] [-m <mode>]')
printusertext('')
printusertext('The script needs a valid initialization configuration file to run (parameter -i).')
printusertext(" For syntax help please see the comment lines in the beginning of this script's code.")
printusertext('')
printusertext('Parameter "-m" has 3 valid forms:')
printusertext(' * -m simulation : This is the default mode. The script will print to output a simulation')
printusertext(' of what changes will be made to what switch. If the target devices are not part of the')
printusertext(' organization defined in "-o", the script will fail.')
printusertext(' * -m simulation+claim : The script will print to output a simulation')
printusertext(' of what changes will be made to what switch. If the target devices are not part of the')
printusertext(' organization defined in "-o", the script will attempt to claim it and read needed info.')
printusertext(' * -m commit : The script will migrate Comware configuration to the Meraki cloud.')
printusertext('')
printusertext(' Example:')
printusertext(' python migratecomware.py -k 1234 -o MyOrg -i initconfig.txt -u foo -p bar -m commit')
printusertext('')
printusertext('Use double quotes ("") in Windows to pass arguments containing spaces. Names are case-sensitive.')
### SECTION: Functions for interacting with SSH and files
def loadinitcfg(p_filename, p_defaultuser, p_defaultpass):
#loads initial configuration from a file with network and device definitions
configtable = [] #to be filled with c_merakidevice() instances
networkdefined = False
currentnet = ''
dcount = 0
linenum = 0
try:
f = open(p_filename, 'r')
except:
return(configtable)
#iterate through file and parse lines
for line in f:
linenum += 1
stripped = line.strip()
#drop blank lines
if len(stripped) > 0:
#drop comments
if stripped[0] != '
#process network definition lines
if stripped [:4] == 'net=':
if len(stripped[4:]) > 0:
currentnet = stripped[4:]
networkdefined = True
else:
printusertext('ERROR: Init config (line %d): Network name cannot be blank' % linenum)
sys.exit(2)
else:
#else process as a device record
if networkdefined:
splitline = stripped.split()
if len(splitline) > 1:
#look for file keyword and load config accordingly
if splitline[0] == 'file':
if len(splitline) > 2:
configtable.append(c_merakidevice())
configtable[dcount].netname = currentnet
configtable[dcount].srcfile = splitline[1]
configtable[dcount].serial = splitline[2]
dcount += 1
else:
printusertext('ERROR: Init config (line %d): Invalid definition: %s' % (linenum, stripped))
sys.exit(2)
else:
#not a source file definition. assume FQDN/IP
configtable.append(c_merakidevice())
configtable[dcount].netname = currentnet
configtable[dcount].srcip = splitline[0]
configtable[dcount].serial = splitline[1]
if len(splitline) > 3:
#device-specific username and password defined
configtable[dcount].srcuser = splitline[2]
configtable[dcount].srcpass = splitline[3]
elif len(splitline) > 2:
#got either username or password, but not both
printusertext('ERROR: Init config (line %d): Invalid definition: %s' % (linenum, stripped))
sys.exit(2)
else:
#no device-specific username/password configuration. use defaults
#abort if default user/password are invalid
if p_defaultuser == '\n' or p_defaultpass == '\n':
printusertext('ERROR: Default SSH credentials needed, but not defined')
sys.exit(2)
configtable[dcount].srcuser = p_defaultuser
configtable[dcount].srcpass = p_defaultpass
dcount += 1
else:
printusertext('ERROR: Init config (line %d): Invalid definition: %s' % (linenum, stripped))
sys.exit(2)
else:
printusertext('ERROR: Init config (line %d): Device with no network defined' % linenum)
sys.exit(2)
dcount += 1
f.close()
return (configtable)
def loadcomwareconfig (p_hostip, p_user, p_pass):
#logs into a comware-based device using SSH and pulls its current configuration
#returns a single line 'null' on SSH errors
linetable = []
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(p_hostip, username=p_user, password=p_pass)
stdin, stdout, stderr = ssh.exec_command("display current")
#THE LINE BELOW IS USED TO DISMISS "MORE" PROMPTS WHEN DISPLAYING CONFIG. ADJUST # OF SPACES IF NEEDED
stdin.write(' \n')
stdin.flush()
except:
printusertext('WARNING: Could not connect to source device: %s' % p_hostip)
linetable.append('null')
return (linetable)
strippedline = []
initiated = False
for line in stdout.read().splitlines():
if len(line) > 0:
strippedline = line.strip().decode('ascii')
# drop all lines before the first prompt (login banner, etc)
# a login banner line starting with "<" and ending with ">" may cause the script to fail
# check for sequence '<hostname>'
if strippedline.startswith('<') and strippedline.endswith('>'):
initiated = True
if initiated and strippedline[0] != '<':
# check all long lines to see if they start with " ---- More ----"
if len(strippedline) > 15:
# look for sequence "---"
if strippedline[:3] == '---':
# remove garbage from beginning of line
strippedline = strippedline[19:].lstrip()[5:].lstrip()
# drop comments, check for character 35: "#"
if strippedline[0] != '
# store ascii representations of received characters
linetable.append(strippedline)
return (linetable)
def loadcomwarecfgfile(p_filename):
#loads source device configuration from file
linetable = []
try:
f = open(p_filename, 'r')
except:
linetable.append('null')
printusertext('WARNING: Could not read source config file: %s' % p_filename)
return(linetable)
strippedline = ''
for line in f:
strippedline = line.strip()
if len(strippedline) > 0:
#ignore comments
if strippedline[0] != '
linetable.append(strippedline)
f.close()
return (linetable)
def extracthostname(p_rawcfg):
#extract hostname form device config
#command parser loop
for cfgline in p_rawcfg:
pieces = cfgline.split()
if pieces[0] == 'sysname':
return (pieces[1])
return ('')
def extractportcfg(p_rawcfg):
#extracts port (interface) configuration from a comware configuration table
intcfg = []
intcount = 0
avlan = '' #string for building allowed VLAN value
supportedinterface = False
#command parser loop
for cfgline in p_rawcfg:
pieces = cfgline.split()
if pieces[0] == 'description' and supportedinterface:
#set int desc as port name. strip everything except alphanumerics and "_"
intcfg[intcount-1].name = re.sub(r'\W+','', cfgline[12:])[:20]
elif pieces[0] == 'interface':
#if interface is of a supported type, create new entry. otherwise ignore it
#and lock int command parsing functions until a supported one comes up
if pieces[1][:15] == 'GigabitEthernet':
intcfg.append(c_portconfig())
intcfg[intcount].type = 'GigabitEthernet'
#WARNING: THE LINE BELOW ONLY WORKS PROPERLY FOR 1RU SWITCHES
intcfg[intcount].number = pieces[1].split('/')[-1] #only take last number in string
intcount += 1
supportedinterface = True
else:
supportedinterface = False
elif pieces[0] == 'port' and supportedinterface:
if pieces[1] == 'access':
if pieces[2] == 'vlan':
intcfg[intcount-1].vlan = pieces[3]
if pieces[1] == 'link-type':
intcfg[intcount-1].mode = pieces[2]
if pieces[1] == 'trunk':
if pieces[2] == 'permit':
#example Comware command: port link-type trunk permit vlan 10 50 to 60
if pieces[3] == 'vlan':
avlan = ''
for i in range(4, len(pieces)):
if pieces[i] == 'to':
avlan += '-'
else:
if len(avlan) == 0:
avlan += pieces[i]
elif avlan[len(avlan)-1] == '-':
avlan += pieces[i]
else:
avlan += ',%s' % pieces[i]
intcfg[intcount-1].allowedvlans = avlan
if pieces[2] == 'pvid':
if pieces[3] == 'vlan':
intcfg[intcount-1].vlan = pieces[4]
#elif pieces[0] == 'port-security':
#DEBUG: keep the line below commented, unless debugging this function
#printusertext ('DEBUG: Port security: %s' % pieces[1])
# if intcount == 0:
#still in global config
# if pieces[1] == 'enable':
#printusertext ('DEBUG: Enable port-security')
# continue
elif pieces[0] == 'shutdown' and supportedinterface:
intcfg[intcount-1].enabled = 'false'
#elif pieces[0] == 'undo' and supportedinterface:
#DEBUG: keep the line below commented, unless debugging this function
#printusertext ('DEBUG: Undo for int [%d]: %s' % (intcount, pieces[1]))
# if pieces[1] == 'dot1x':
#printusertext ('DEBUG: Dot1x: %s' % pieces[2])
# continue
#else:
#DEBUG: keep the line below commented, unless debugging this function
#print ('DEBUG: Invalid line')
return(intcfg)
### SECTION: Functions for interacting with Dashboard
def getorgid(p_apikey, p_orgname):
#looks up org id for a specific org name
#on failure returns 'null'
r = requests.get('https://dashboard.meraki.com/api/v0/organizations', headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return 'null'
rjson = r.json()
for record in rjson:
if record['name'] == p_orgname:
return record['id']
return('null')
def getshardurl(p_apikey, p_orgid):
#patch
return("api-mp.meraki.com")
def getnwid(p_apikey, p_shardurl, p_orgid, p_nwname):
#looks up network id for a network name
#on failure returns 'null'
r = requests.get('https://%s/api/v0/organizations/%s/networks' % (p_shardurl, p_orgid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return 'null'
rjson = r.json()
for record in rjson:
if record['name'] == p_nwname:
return record['id']
return('null')
def createnw(p_apikey, p_shardurl, p_dstorg, p_nwdata):
#creates network if one does not already exist with the same name
#example for p_nwdata:
#nwparams = {'name': 'hi', 'timeZone': 'Europe/Helsinki', 'tags': 'mytag', 'organizationId': '123', 'type': 'switch appliance'}
#check if network exists
getnwresult = getnwid(p_apikey, p_shardurl, p_dstorg, p_nwdata['name'])
if getnwresult != 'null':
printusertext('WARNING: Skipping network "%s" (Already exists)' % p_nwdata['name'])
return('null')
if p_nwdata['type'] == 'combined':
#find actual device types
nwtype = 'wireless switch appliance'
else:
nwtype = p_nwdata['type']
if nwtype != 'systems manager':
r = requests.post('https://%s/api/v0/organizations/%s/networks' % (p_shardurl, p_dstorg), data=json.dumps({'timeZone': p_nwdata['timeZone'], 'tags': p_nwdata['tags'], 'name': p_nwdata['name'], 'organizationId': p_dstorg, 'type': nwtype}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
else:
printusertext('WARNING: Skipping network "%s" (Cannot create SM networks)' % p_nwdata['name'])
return('null')
return('ok')
def claimdevice(p_apikey, p_shardurl, p_nwid, p_devserial):
#claims a device into an org
r = requests.post('https://%s/api/v0/networks/%s/devices/claim' % (p_shardurl, p_nwid), data=json.dumps({'serial': p_devserial}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
return(0)
def claimdeviceorg(p_apikey, p_shardurl, p_orgid, p_devserial):
#claims a device into an org without adding to a network
r = requests.post('https://%s/api/v0/organizations/%s/claim' % (p_shardurl, p_orgid), data=json.dumps({'serial': p_devserial}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
return(0)
def getorgdeviceinfo (p_apikey, p_shardurl, p_orgid, p_devserial):
#gets basic device info from org inventory. device does not need to be part of a network
r = requests.get('https://%s/api/v0/organizations/%s/inventory' % (p_shardurl, p_orgid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
returnvalue = {}
if r.status_code != requests.codes.ok:
returnvalue = {'serial':'null', 'model':'null'}
return(returnvalue)
rjson = r.json()
foundserial = False
for record in rjson:
if record['serial'] == p_devserial:
foundserial = True
returnvalue = {'mac': record['mac'], 'serial': record['serial'], 'networkId': record['networkId'], 'model': record['model'], 'claimedAt': record['claimedAt'], 'publicIp': record['publicIp']}
if not foundserial:
returnvalue = {'serial':'null', 'model':'null'}
return(returnvalue)
def setswportconfig(p_apikey, p_shardurl, p_devserial, p_portnum, p_portcfg):
#sets switchport configuration to match table given as parameter
validconfig = {}
for key, value in p_portcfg.items():
if value != '':
validconfig[key] = value
r = requests.put('https://%s/api/v0/devices/%s/switchPorts/%s' % (p_shardurl, p_devserial, p_portnum), data=json.dumps(validconfig), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
return (0)
def setdevicedata(p_apikey, p_shardurl, p_nwid, p_devserial, p_field, p_value, p_movemarker):
#modifies value of device record. Returns the new value
#on failure returns one device record, with all values 'null'
#p_movemarker is boolean: True/False
movevalue = "false"
if p_movemarker:
movevalue = "true"
r = requests.put('https://%s/api/v0/networks/%s/devices/%s' % (p_shardurl, p_nwid, p_devserial), data=json.dumps({p_field: p_value, 'moveMapMarker': movevalue}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return ('null')
return('ok')
def migratedevices(p_apikey, p_shardurl, p_orgid, p_devt, p_mode):
#migrates configuration according to device table p_devt. has three modes according to p_mode
#p_mode = 'commit' : uploads configuration to Meraki cloud
#p_mode = 'simulation': prints intended changes to stdout without touching cloud. will fail if device not in inventory
#p_mode = 'simulation+claim': prints intended changes to stdout without touching cloud. will attempt to claim devices if they are not in inventory to get info
mode_commit = False
mode_claim = False
nwid = ''
portconfig = {}
max_migrated_ports = 0
if p_mode == 'commit':
mode_commit = True
mode_claim = True
elif p_mode == 'simulation+claim':
mode_claim = True
for dev in p_devt:
nwid = getnwid(p_apikey, p_shardurl, p_orgid, dev.netname)
if nwid == 'null' and mode_commit:
#if nw missing and commit mode, it needs to be created
#nwid == 'null' is OK if running simulation
#NOTE THAT TIMEZONE IS HARDCODED IN THE SCRIPT AT THIS POINT. THIS MAY CHANGE IN A LATER VERSION
nwparams = {'name': dev.netname, 'timeZone': 'Europe/Helsinki', 'tags': 'migratecomwarepy', 'organizationId': p_orgid, 'type': 'switch'}
createnw(p_apikey, p_shardurl, p_orgid, nwparams)
nwid = getnwid(p_apikey, p_shardurl, p_orgid, dev.netname)
#check if something went wrong
if nwid == 'null':
printusertext('ERROR: Unable to get ID for network %s' % dev.netname)
sys.exit(2)
#get model of device to check that it is a switch
devinfo = getorgdeviceinfo (p_apikey, p_shardurl, p_orgid, dev.serial)
if devinfo['model'] == 'null':
if mode_claim:
claimdeviceorg(p_apikey, p_shardurl, p_orgid, dev.serial)
devinfo = getorgdeviceinfo (p_apikey, p_shardurl, p_orgid, dev.serial)
if devinfo['model'] == 'null':
printusertext('ERROR: Unable to claim device %s' % dev.serial)
sys.exit(2)
else:
printusertext('ERROR: Device %s not part of org %s' % (dev.serial, p_orgid))
sys.exit(2)
if devinfo['model'][:2] != 'MS':
printusertext('ERROR: Device %s is type "%s": Not a switch' % (dev.serial, devinfo['model']))
sys.exit(2)
#at this stage we have nwid and device model
#the switch may or may not be part of a network, so cannot read number of ports dynamically.
#it will need to be done as part of a static configuration list
#assumes model name convention of MXxxx-yyzz, where xxx: model series, yy:number of ports, zz:poe
modelnumber = re.sub(r'[^0-9]','',devinfo['model'][:5])
portnumber = re.sub(r'[^0-9]','',devinfo['model'][6:])
if modelnumber == '220':
if portnumber == '8':
max_migrated_ports = 10
elif portnumber == '24':
max_migrated_ports = 28
elif portnumber == '48':
max_migrated_ports = 52
elif modelnumber == '225':
if portnumber == '24':
max_migrated_ports = 28
elif portnumber == '48':
max_migrated_ports = 52
elif modelnumber == '250':
if portnumber == '24':
max_migrated_ports = 28
elif portnumber == '48':
max_migrated_ports = 52
elif modelnumber == '350':
if portnumber == '24':
max_migrated_ports = 28
elif portnumber == '48':
max_migrated_ports = 52
elif modelnumber == '410':
if portnumber == '16':
max_migrated_ports = 18
elif portnumber == '32':
max_migrated_ports = 34
elif modelnumber == '425':
if portnumber == '16':
max_migrated_ports = 18
elif portnumber == '32':
max_migrated_ports = 34
else:
#if unknown device model, assume 0 uplinks as failsafe, until the script is updated to support it
intportnumber = int(portnumber)
#if Meraki switch nodel naming has changed from MSxxx-yy, the line below will fail
if intportnumber <= 48:
max_migrated_ports = intportnumber
#deal with port number mismatches
if len(dev.portcfg) < max_migrated_ports:
max_migrated_ports = len(dev.portcfg)
#now that we also know the MAC address of the device, we can also reset the hostname
#for devices that did not get a value by running extracthostname() previously
if dev.hostname == '':
dev.hostname = devinfo['mac']
#do preliminary stuff, like claiming device to nw or printing header
if mode_commit:
claimdevice(p_apikey, p_shardurl, nwid, dev.serial)
devinfo = getorgdeviceinfo (p_apikey, p_shardurl, p_orgid, dev.serial)
if devinfo['networkId'] != nwid:
printusertext('ERROR: Unable set network for device %s' % dev.serial)
sys.exit(2)
#set hostname. Don't worry if it fails
setdevicedata(p_apikey, p_shardurl, nwid, dev.serial, 'name', dev.hostname, 'false')
printusertext('INFO: Migrating device %s (name: %s), source %s%s' % (dev.serial, dev.hostname, dev.srcip, dev.srcfile))
else:
print('')
print('Migration target device %s (name: %s, %s) in network "%s"' % (dev.serial, dev.hostname, devinfo['model'],dev.netname))
print('Source: %s%s' % (dev.srcip, dev.srcfile))
print('Num Name Mode Enabled VLAN PoE VoiceVLAN TrnkAllowVLAN')
for i in range (0, max_migrated_ports):
portconfig = {'isolationEnabled': dev.portcfg[i].isolation, 'rstpEnabled': dev.portcfg[i].rstp, 'enabled': dev.portcfg[i].enabled, 'stpGuard': dev.portcfg[i].stpguard, 'accessPolicyNumber': '', 'type': dev.portcfg[i].mode, 'allowedVlans': dev.portcfg[i].allowedvlans, 'poeEnabled': dev.portcfg[i].poeenabled, 'name': dev.portcfg[i].name, 'tags': 'migratecomwarepy', 'number': dev.portcfg[i].number, 'vlan': dev.portcfg[i].vlan, 'voiceVlan': dev.portcfg[i].voicevlan}
if mode_commit:
setswportconfig(p_apikey, p_shardurl, dev.serial, dev.portcfg[i].number, portconfig)
else:
print('%s %s %s %s %s %s %s %s' % ("{:>3s}".format(portconfig['number']), "{:>20s}".format(portconfig['name']), "{:>7s}".format(portconfig['type']), "{:>6s}".format(portconfig['enabled']), "{:>5s}".format(portconfig['vlan']), "{:>7s}".format(portconfig['poeEnabled']), "{:>5s}".format(portconfig['voiceVlan']), portconfig['allowedVlans']))
return()
_orgname = 'null'
arg_initfile = '????'
arg_defuser = '\n'
arg_defpass = '\n'
arg_mode = 'simulation'
try:
opts, args = getopt.getopt(argv, 'hk:o:i:u:p:m:')
except getopt.GetoptError:
printhelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
printhelp()
sys.exit()
elif opt == '-k':
arg_apikey = arg
elif opt == '-o':
arg_orgname = arg
elif opt == '-i':
arg_initfile = arg
elif opt == '-u':
arg_defuser = arg
elif opt == '-p':
arg_defpass = arg
elif opt == '-m':
arg_mode = arg
if arg_apikey == 'null' or arg_orgname == 'null' or arg_initfile == '????':
printhelp()
sys.exit(2)
orgid = getorgid(arg_apikey, arg_orgname)
if orgid == 'null':
printusertext('ERROR: Fetching organization failed')
sys.exit(2)
shardurl = getshardurl(arg_apikey, orgid)
if shardurl == 'null':
printusertext('ERROR: Fetching Meraki cloud shard FQDN failed')
sys.exit(2)
devices = loadinitcfg(arg_initfile, arg_defuser, arg_defpass)
if len(devices) == 0:
printusertext('ERROR: No valid configuration in init file')
sys.exit(2)
for i in range(0, len(devices)):
if devices[i].srcip != '':
devices[i].rawcfg = loadcomwareconfig (devices[i].srcip, devices[i].srcuser, devices[i].srcpass)
else:
devices[i].rawcfg = loadcomwarecfgfile (devices[i].srcfile)
for dev in devices:
dev.hostname = extracthostname(dev.rawcfg)
dev.portcfg = extractportcfg(dev.rawcfg)
if arg_mode == 'simulation':
migratedevices(arg_apikey, shardurl, orgid, devices, 'simulation')
elif arg_mode == 'commit':
migratedevices(arg_apikey, shardurl, orgid, devices, 'commit')
elif arg_mode == 'simulation+claim':
migratedevices(arg_apikey, shardurl, orgid, devices, 'simulation+claim')
else:
printusertext('ERROR: Parameter -m: Operating mode not valid')
sys.exit(2)
printusertext('End of script.')
if __name__ == '__main__':
main(sys.argv[1:]) | true | true |
f73c8194e9946de53aa2ccb8d9a7e88d45fcca13 | 20,392 | py | Python | api/app/users/tests.py | Jean-Lytehouse/Lytehouse-Autocam | 74df801a652325be86e52e337c0f9471694da02a | [
"Apache-2.0"
] | null | null | null | api/app/users/tests.py | Jean-Lytehouse/Lytehouse-Autocam | 74df801a652325be86e52e337c0f9471694da02a | [
"Apache-2.0"
] | null | null | null | api/app/users/tests.py | Jean-Lytehouse/Lytehouse-Autocam | 74df801a652325be86e52e337c0f9471694da02a | [
"Apache-2.0"
] | null | null | null | import json
from datetime import datetime, timedelta
from app import app, db
from app.utils.testing import ApiTestCase
from app.users.models import AppUser, PasswordReset, UserCategory, Country, UserComment
from app.events.models import Event, EventRole
from app.applicationModel.models import ApplicationForm
from app.responses.models import Response
USER_DATA = {
'email': 'something@email.com',
'firstname': 'Some',
'lastname': 'Thing',
'user_title': 'Mr',
'nationality_country_id': 1,
'residence_country_id': 1,
'user_gender': 'Male',
'affiliation': 'University',
'department': 'Computer Science',
'user_disability': 'None',
'user_category_id': 1,
'user_primaryLanguage': 'Zulu',
'user_dateOfBirth': datetime(1984, 12, 12).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'password': '123456'
}
AUTH_DATA = {
'email': 'something@email.com',
'password': '123456'
}
class UserApiTest(ApiTestCase):
def seed_static_data(self):
db.session.add(UserCategory('Postdoc'))
db.session.add(Country('South Africa'))
self.event1 = Event('Indaba', 'Indaba Event',
datetime.now(), datetime.now())
self.event2 = Event('IndabaX', 'IndabaX Sudan',
datetime.now(), datetime.now())
db.session.add(self.event1)
db.session.add(self.event2)
db.session.commit()
self.event1_id = self.event1.id
self.event2_id = self.event2.id
db.session.flush()
def get_auth_header_for(self, email):
body = {
'email': email,
'password': 'abc'
}
response = self.app.post('api/v1/authenticate', data=body)
data = json.loads(response.data)
header = {'Authorization': data['token']}
return header
def test_registration(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert data['id'] == 1
assert len(data['token']) > 10
def test_duplicate_registration(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
assert response.status_code == 201
response = self.app.post('/api/v1/user', data=USER_DATA)
assert response.status_code == 409
def test_get_user(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
headers = {'Authorization': data['token']}
response = self.app.get('/api/v1/user', headers=headers)
data = json.loads(response.data)
assert data['email'] == 'something@email.com'
assert data['firstname'] == 'Some'
assert data['lastname'] == 'Thing'
assert data['user_title'] == 'Mr'
assert data['nationality_country'] == 'South Africa'
assert data['residence_country'] == 'South Africa'
assert data['user_gender'] == 'Male'
assert data['affiliation'] == 'University'
assert data['department'] == 'Computer Science'
assert data['user_disability'] == 'None'
assert data['user_category'] == 'Postdoc'
assert data['user_primaryLanguage'] == 'Zulu'
assert data['user_dateOfBirth'] == datetime(
1984, 12, 12).strftime('%Y-%m-%dT%H:%M:%S')
def test_update_user(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
headers = {'Authorization': data['token']}
response = self.app.put('/api/v1/user', headers=headers, data={
'email': 'something@email.com',
'firstname': 'Updated',
'lastname': 'Updated',
'user_title': 'Mrs',
'nationality_country_id': 1,
'residence_country_id': 1,
'user_gender': 'Female',
'affiliation': 'Company',
'department': 'AI',
'user_disability': 'None',
'user_category_id': 1,
'user_primaryLanguage': 'Zulu',
'user_dateOfBirth': datetime(1984, 12, 12, 0, 0, 0).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'password': ''
})
assert response.status_code == 200
response = self.app.get('/api/v1/user', headers=headers)
data = json.loads(response.data)
assert data['email'] == 'something@email.com'
assert data['firstname'] == 'Updated'
assert data['lastname'] == 'Updated'
assert data['user_title'] == 'Mrs'
assert data['nationality_country'] == 'South Africa'
assert data['residence_country'] == 'South Africa'
assert data['user_gender'] == 'Female'
assert data['affiliation'] == 'Company'
assert data['department'] == 'AI'
assert data['user_disability'] == 'None'
assert data['user_category'] == 'Postdoc'
assert data['user_primaryLanguage'] == 'Zulu'
assert data['user_dateOfBirth'] == datetime(
1984, 12, 12, 0, 0, 0, 0).strftime('%Y-%m-%dT%H:%M:%S')
def test_authentication_deleted(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
headers = {'Authorization': data['token']}
response = self.app.delete('api/v1/user', headers=headers)
assert response.status_code == 200
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
assert response.status_code == 404
def test_authentication_unverified_email(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
assert response.status_code == 201
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
assert response.status_code == 422
def test_authentication_wrong_password(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
assert response.status_code == 201
response = self.app.post('/api/v1/authenticate', data={
'email': 'something@email.com',
'password': 'wrong'
})
assert response.status_code == 401
def test_authentication(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
assert response.status_code == 201
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
assert response.status_code == 200
def test_authentication_response(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
role1 = EventRole('admin', data['id'], self.event1_id)
role2 = EventRole('reviewer', data['id'], self.event2_id)
db.session.add(role1)
db.session.add(role2)
db.session.commit()
db.session.flush()
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
data = json.loads(response.data)
self.assertEqual(data['firstname'], USER_DATA['firstname'])
self.assertEqual(data['lastname'], USER_DATA['lastname'])
self.assertEqual(data['title'], USER_DATA['user_title'])
self.assertEqual(data['roles'], [
{'event_id': self.event1_id, 'role': 'admin'},
{'event_id': self.event2_id, 'role': 'reviewer'},
])
def test_password_reset_user_does_not_exist(self):
response = self.app.post('/api/v1/password-reset/request', data={
'email': 'something@email.com'
})
assert response.status_code == 404
def test_password_reset_expired(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
assert response.status_code == 201
response = self.app.post('/api/v1/password-reset/request', data={
'email': 'something@email.com'
})
assert response.status_code == 201
pw_reset = db.session.query(PasswordReset).first()
pw_reset.date = datetime.now() - timedelta(days=2)
db.session.commit()
response = self.app.post('/api/v1/password-reset/confirm', data={
'code': pw_reset.code,
'password': 'abc123'
})
assert response.status_code == 400
def test_password_reset(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
assert response.status_code == 201
response = self.app.post('/api/v1/password-reset/request', data={
'email': 'something@email.com'
})
assert response.status_code == 201
pw_reset = db.session.query(PasswordReset).first()
response = self.app.post('/api/v1/password-reset/confirm', data={
'code': "bad code",
'password': 'abc123'
})
assert response.status_code == 418
response = self.app.post('/api/v1/password-reset/confirm', data={
'code': pw_reset.code,
'password': 'abc123'
})
assert response.status_code == 201
response = self.app.post('/api/v1/authenticate', data={
'email': 'something@email.com',
'password': 'abc123'
})
assert response.status_code == 200
def test_deletion(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
user_id = data['id']
headers = {'Authorization': data['token']}
response = self.app.delete('/api/v1/user', headers=headers)
assert response.status_code == 200
user = db.session.query(AppUser).filter(AppUser.id == user_id).first()
assert user.email == 'something@email.com'
assert user.is_deleted == True
def test_resend_verification_email(self):
self.seed_static_data()
self.app.post('/api/v1/user', data=USER_DATA)
response = self.app.get(
'/api/v1/resend-verification-email?email={}'.format(USER_DATA['email']))
assert response.status_code == 201
def test_resend_verification_email_no_user(self):
self.seed_static_data()
response = self.app.get(
'/api/v1/resend-verification-email?email={}'.format('nonexistant@dummy.com'))
assert response.status_code == 404
def setup_verified_user(self):
user = AppUser('something@email.com', 'Some', 'Thing', 'Mr',
1, 1, 'Male', 'University', 'Computer Science',
'None', 1, datetime(1984, 12, 12),
'English', '123456')
user.verify_token = 'existing token'
user.verify()
db.session.add(user)
db.session.commit()
def test_email_change_gets_new_token_and_is_unverified(self):
self.seed_static_data()
self.setup_verified_user()
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
data = json.loads(response.data)
headers = {'Authorization': data['token']}
response = self.app.put('/api/v1/user', headers=headers, data={
'email': 'somethingnew@email.com',
'firstname': 'Some',
'lastname': 'Thing',
'user_title': 'Mr',
'nationality_country_id': 1,
'residence_country_id': 1,
'user_gender': 'Male',
'affiliation': 'University',
'department': 'Computer Science',
'user_disability': 'None',
'user_category_id': 1,
'user_primaryLanguage': 'Zulu',
'user_dateOfBirth': datetime(1984, 12, 12).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'password':''
})
self.assertEqual(response.status_code, 200)
user = db.session.query(AppUser).get(1)
self.assertEqual(user.email, 'somethingnew@email.com')
self.assertEqual(user.firstname, 'Some')
self.assertEqual(user.lastname, 'Thing')
self.assertEqual(user.user_title, 'Mr')
self.assertEqual(user.nationality_country_id, 1)
self.assertEqual(user.residence_country_id, 1)
self.assertEqual(user.user_gender, 'Male')
self.assertEqual(user.affiliation, 'University')
self.assertEqual(user.department, 'Computer Science')
self.assertEqual(user.user_disability, 'None')
self.assertEqual(user.user_category_id, 1)
self.assertEqual(user.user_primaryLanguage, 'Zulu')
self.assertEqual(user.user_dateOfBirth, datetime(1984, 12, 12))
self.assertEqual(user.verified_email, False)
self.assertNotEqual(user.verify_token, 'existing token')
def setup_responses(self):
application_forms = [
ApplicationForm(1, True, datetime(2019, 4, 12)),
ApplicationForm(2, False, datetime(2019, 4, 12))
]
db.session.add_all(application_forms)
candidate1 = AppUser('c1@c.com', 'candidate', '1', 'Mr', 1, 1, 'M', 'UWC', 'CS', 'NA', 1, datetime(1984, 12, 12), 'Eng', 'abc')
candidate2 = AppUser('c2@c.com', 'candidate', '2', 'Ms', 1, 1, 'F', 'RU', 'Chem', 'NA', 1, datetime(1984, 12, 12), 'Eng', 'abc')
candidate3 = AppUser('c3@c.com', 'candidate', '3', 'Mr', 1, 1, 'M', 'UFH', 'Phys', 'NA', 1, datetime(1984, 12, 12), 'Eng', 'abc')
event_admin = AppUser('ea@ea.com', 'event_admin', '1', 'Ms', 1, 1, 'F', 'NWU', 'Math', 'NA', 1, datetime(1984, 12, 12), 'Eng', 'abc')
users = [candidate1, candidate2, candidate3, event_admin]
for user in users:
user.verify()
db.session.add_all(users)
event_role = EventRole('admin', 4, 1)
db.session.add(event_role)
responses = [
Response(1, 1, True, datetime(2019, 4, 10)),
Response(1, 2, True, datetime(2019, 4, 9), True, datetime(2019, 4, 11)),
Response(2, 3, True)
]
db.session.add_all(responses)
db.session.commit()
def test_user_profile_list(self):
self.seed_static_data()
self.setup_responses()
header = self.get_auth_header_for('ea@ea.com')
params = {'event_id': 1}
response = self.app.get('/api/v1/userprofilelist', headers=header, data=params)
data = json.loads(response.data)
data = sorted(data, key=lambda k: k['user_id'])
self.assertEqual(len(data), 2)
self.assertEqual(data[0]['user_id'], 1)
self.assertEqual(data[0]['is_submitted'], True)
self.assertEqual(data[0]['submitted_timestamp'], u'2019-04-10T00:00:00')
self.assertEqual(data[0]['is_withdrawn'], False)
self.assertEqual(data[0]['withdrawn_timestamp'], None)
self.assertEqual(data[1]['user_id'], 2)
self.assertEqual(data[1]['is_submitted'], True)
self.assertEqual(data[1]['submitted_timestamp'], u'2019-04-09T00:00:00')
self.assertEqual(data[1]['is_withdrawn'], True)
self.assertEqual(data[1]['withdrawn_timestamp'], u'2019-04-11T00:00:00')
class UserCommentAPITest(ApiTestCase):
def seed_static_data(self):
db.session.add(UserCategory('Postdoc'))
db.session.add(Country('South Africa'))
self.event1 = Event('Indaba', 'Indaba Event',
datetime.now(), datetime.now())
db.session.add(self.event1)
db.session.commit()
self.event1_id = self.event1.id
user_data1 = USER_DATA.copy()
response = self.app.post('/api/v1/user', data=user_data1)
self.user1 = json.loads(response.data)
user_data2 = USER_DATA.copy()
user_data2['email'] = 'person2@person.com'
user_data2['firstname'] = 'Person'
user_data2['lastname'] = 'Two'
response = self.app.post('/api/v1/user', data=user_data2)
self.user2 = json.loads(response.data)
user2 = db.session.query(AppUser).filter(AppUser.email == 'person2@person.com').first()
user2.is_admin = True
db.session.flush()
def seed_comments(self):
self.comment1 = UserComment(self.event1_id, self.user1['id'], self.user2['id'], datetime.now(), 'Comment 1')
self.comment2 = UserComment(self.event1_id, self.user1['id'], self.user2['id'], datetime.now(), 'Comment 2')
self.comment3 = UserComment(self.event1_id, self.user2['id'], self.user1['id'], datetime.now(), 'Comment 3')
db.session.add_all([self.comment1, self.comment2, self.comment3])
db.session.flush()
def test_post_comment(self):
with app.app_context():
self.seed_static_data()
params = {'event_id': self.event1_id, 'user_id': self.user2['id'], 'comment': 'Comment1'}
print('Sending params: ', params)
response = self.app.post('/api/v1/user-comment', headers={'Authorization': self.user1['token']}, data=json.dumps(params), content_type='application/json')
data = json.loads(response.data)
self.assertEqual(response.status_code, 201)
def test_get_forbidden(self):
with app.app_context():
self.seed_static_data()
self.seed_comments()
params = {'event_id': self.event1_id, 'user_id': self.user2['id']}
response = self.app.get('/api/v1/user-comment', headers={'Authorization': self.user1['token']}, query_string=params)
self.assertEqual(response.status_code, 403)
def test_get_comments(self):
with app.app_context():
self.seed_static_data()
self.seed_comments()
params = {'event_id': self.event1_id, 'user_id': self.user1['id']}
response = self.app.get('/api/v1/user-comment', headers={'Authorization': self.user2['token']}, query_string=params)
comment_list = json.loads(response.data)
self.assertEqual(len(comment_list), 2)
self.assertEqual(comment_list[0]['event_id'], self.comment1.event_id)
self.assertEqual(comment_list[0]['user_id'], self.comment1.user_id)
self.assertEqual(comment_list[0]['comment_by_user_firstname'], self.user2['firstname'])
self.assertEqual(comment_list[0]['comment_by_user_lastname'], self.user2['lastname'])
self.assertEqual(comment_list[0]['comment'], self.comment1.comment)
self.assertEqual(comment_list[1]['event_id'], self.comment2.event_id)
self.assertEqual(comment_list[1]['user_id'], self.comment2.user_id)
self.assertEqual(comment_list[1]['comment_by_user_firstname'], self.user2['firstname'])
self.assertEqual(comment_list[1]['comment_by_user_lastname'], self.user2['lastname'])
self.assertEqual(comment_list[1]['comment'], self.comment2.comment)
| 39.44294 | 166 | 0.603276 | import json
from datetime import datetime, timedelta
from app import app, db
from app.utils.testing import ApiTestCase
from app.users.models import AppUser, PasswordReset, UserCategory, Country, UserComment
from app.events.models import Event, EventRole
from app.applicationModel.models import ApplicationForm
from app.responses.models import Response
USER_DATA = {
'email': 'something@email.com',
'firstname': 'Some',
'lastname': 'Thing',
'user_title': 'Mr',
'nationality_country_id': 1,
'residence_country_id': 1,
'user_gender': 'Male',
'affiliation': 'University',
'department': 'Computer Science',
'user_disability': 'None',
'user_category_id': 1,
'user_primaryLanguage': 'Zulu',
'user_dateOfBirth': datetime(1984, 12, 12).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'password': '123456'
}
AUTH_DATA = {
'email': 'something@email.com',
'password': '123456'
}
class UserApiTest(ApiTestCase):
def seed_static_data(self):
db.session.add(UserCategory('Postdoc'))
db.session.add(Country('South Africa'))
self.event1 = Event('Indaba', 'Indaba Event',
datetime.now(), datetime.now())
self.event2 = Event('IndabaX', 'IndabaX Sudan',
datetime.now(), datetime.now())
db.session.add(self.event1)
db.session.add(self.event2)
db.session.commit()
self.event1_id = self.event1.id
self.event2_id = self.event2.id
db.session.flush()
def get_auth_header_for(self, email):
body = {
'email': email,
'password': 'abc'
}
response = self.app.post('api/v1/authenticate', data=body)
data = json.loads(response.data)
header = {'Authorization': data['token']}
return header
def test_registration(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert data['id'] == 1
assert len(data['token']) > 10
def test_duplicate_registration(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
assert response.status_code == 201
response = self.app.post('/api/v1/user', data=USER_DATA)
assert response.status_code == 409
def test_get_user(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
headers = {'Authorization': data['token']}
response = self.app.get('/api/v1/user', headers=headers)
data = json.loads(response.data)
assert data['email'] == 'something@email.com'
assert data['firstname'] == 'Some'
assert data['lastname'] == 'Thing'
assert data['user_title'] == 'Mr'
assert data['nationality_country'] == 'South Africa'
assert data['residence_country'] == 'South Africa'
assert data['user_gender'] == 'Male'
assert data['affiliation'] == 'University'
assert data['department'] == 'Computer Science'
assert data['user_disability'] == 'None'
assert data['user_category'] == 'Postdoc'
assert data['user_primaryLanguage'] == 'Zulu'
assert data['user_dateOfBirth'] == datetime(
1984, 12, 12).strftime('%Y-%m-%dT%H:%M:%S')
def test_update_user(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
headers = {'Authorization': data['token']}
response = self.app.put('/api/v1/user', headers=headers, data={
'email': 'something@email.com',
'firstname': 'Updated',
'lastname': 'Updated',
'user_title': 'Mrs',
'nationality_country_id': 1,
'residence_country_id': 1,
'user_gender': 'Female',
'affiliation': 'Company',
'department': 'AI',
'user_disability': 'None',
'user_category_id': 1,
'user_primaryLanguage': 'Zulu',
'user_dateOfBirth': datetime(1984, 12, 12, 0, 0, 0).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'password': ''
})
assert response.status_code == 200
response = self.app.get('/api/v1/user', headers=headers)
data = json.loads(response.data)
assert data['email'] == 'something@email.com'
assert data['firstname'] == 'Updated'
assert data['lastname'] == 'Updated'
assert data['user_title'] == 'Mrs'
assert data['nationality_country'] == 'South Africa'
assert data['residence_country'] == 'South Africa'
assert data['user_gender'] == 'Female'
assert data['affiliation'] == 'Company'
assert data['department'] == 'AI'
assert data['user_disability'] == 'None'
assert data['user_category'] == 'Postdoc'
assert data['user_primaryLanguage'] == 'Zulu'
assert data['user_dateOfBirth'] == datetime(
1984, 12, 12, 0, 0, 0, 0).strftime('%Y-%m-%dT%H:%M:%S')
def test_authentication_deleted(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
headers = {'Authorization': data['token']}
response = self.app.delete('api/v1/user', headers=headers)
assert response.status_code == 200
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
assert response.status_code == 404
def test_authentication_unverified_email(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
assert response.status_code == 201
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
assert response.status_code == 422
def test_authentication_wrong_password(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
assert response.status_code == 201
response = self.app.post('/api/v1/authenticate', data={
'email': 'something@email.com',
'password': 'wrong'
})
assert response.status_code == 401
def test_authentication(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
assert response.status_code == 201
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
assert response.status_code == 200
def test_authentication_response(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
role1 = EventRole('admin', data['id'], self.event1_id)
role2 = EventRole('reviewer', data['id'], self.event2_id)
db.session.add(role1)
db.session.add(role2)
db.session.commit()
db.session.flush()
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
data = json.loads(response.data)
self.assertEqual(data['firstname'], USER_DATA['firstname'])
self.assertEqual(data['lastname'], USER_DATA['lastname'])
self.assertEqual(data['title'], USER_DATA['user_title'])
self.assertEqual(data['roles'], [
{'event_id': self.event1_id, 'role': 'admin'},
{'event_id': self.event2_id, 'role': 'reviewer'},
])
def test_password_reset_user_does_not_exist(self):
response = self.app.post('/api/v1/password-reset/request', data={
'email': 'something@email.com'
})
assert response.status_code == 404
def test_password_reset_expired(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
assert response.status_code == 201
response = self.app.post('/api/v1/password-reset/request', data={
'email': 'something@email.com'
})
assert response.status_code == 201
pw_reset = db.session.query(PasswordReset).first()
pw_reset.date = datetime.now() - timedelta(days=2)
db.session.commit()
response = self.app.post('/api/v1/password-reset/confirm', data={
'code': pw_reset.code,
'password': 'abc123'
})
assert response.status_code == 400
def test_password_reset(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
assert response.status_code == 201
response = self.app.post('/api/v1/password-reset/request', data={
'email': 'something@email.com'
})
assert response.status_code == 201
pw_reset = db.session.query(PasswordReset).first()
response = self.app.post('/api/v1/password-reset/confirm', data={
'code': "bad code",
'password': 'abc123'
})
assert response.status_code == 418
response = self.app.post('/api/v1/password-reset/confirm', data={
'code': pw_reset.code,
'password': 'abc123'
})
assert response.status_code == 201
response = self.app.post('/api/v1/authenticate', data={
'email': 'something@email.com',
'password': 'abc123'
})
assert response.status_code == 200
def test_deletion(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
user_id = data['id']
headers = {'Authorization': data['token']}
response = self.app.delete('/api/v1/user', headers=headers)
assert response.status_code == 200
user = db.session.query(AppUser).filter(AppUser.id == user_id).first()
assert user.email == 'something@email.com'
assert user.is_deleted == True
def test_resend_verification_email(self):
self.seed_static_data()
self.app.post('/api/v1/user', data=USER_DATA)
response = self.app.get(
'/api/v1/resend-verification-email?email={}'.format(USER_DATA['email']))
assert response.status_code == 201
def test_resend_verification_email_no_user(self):
self.seed_static_data()
response = self.app.get(
'/api/v1/resend-verification-email?email={}'.format('nonexistant@dummy.com'))
assert response.status_code == 404
def setup_verified_user(self):
user = AppUser('something@email.com', 'Some', 'Thing', 'Mr',
1, 1, 'Male', 'University', 'Computer Science',
'None', 1, datetime(1984, 12, 12),
'English', '123456')
user.verify_token = 'existing token'
user.verify()
db.session.add(user)
db.session.commit()
def test_email_change_gets_new_token_and_is_unverified(self):
self.seed_static_data()
self.setup_verified_user()
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
data = json.loads(response.data)
headers = {'Authorization': data['token']}
response = self.app.put('/api/v1/user', headers=headers, data={
'email': 'somethingnew@email.com',
'firstname': 'Some',
'lastname': 'Thing',
'user_title': 'Mr',
'nationality_country_id': 1,
'residence_country_id': 1,
'user_gender': 'Male',
'affiliation': 'University',
'department': 'Computer Science',
'user_disability': 'None',
'user_category_id': 1,
'user_primaryLanguage': 'Zulu',
'user_dateOfBirth': datetime(1984, 12, 12).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'password':''
})
self.assertEqual(response.status_code, 200)
user = db.session.query(AppUser).get(1)
self.assertEqual(user.email, 'somethingnew@email.com')
self.assertEqual(user.firstname, 'Some')
self.assertEqual(user.lastname, 'Thing')
self.assertEqual(user.user_title, 'Mr')
self.assertEqual(user.nationality_country_id, 1)
self.assertEqual(user.residence_country_id, 1)
self.assertEqual(user.user_gender, 'Male')
self.assertEqual(user.affiliation, 'University')
self.assertEqual(user.department, 'Computer Science')
self.assertEqual(user.user_disability, 'None')
self.assertEqual(user.user_category_id, 1)
self.assertEqual(user.user_primaryLanguage, 'Zulu')
self.assertEqual(user.user_dateOfBirth, datetime(1984, 12, 12))
self.assertEqual(user.verified_email, False)
self.assertNotEqual(user.verify_token, 'existing token')
def setup_responses(self):
application_forms = [
ApplicationForm(1, True, datetime(2019, 4, 12)),
ApplicationForm(2, False, datetime(2019, 4, 12))
]
db.session.add_all(application_forms)
candidate1 = AppUser('c1@c.com', 'candidate', '1', 'Mr', 1, 1, 'M', 'UWC', 'CS', 'NA', 1, datetime(1984, 12, 12), 'Eng', 'abc')
candidate2 = AppUser('c2@c.com', 'candidate', '2', 'Ms', 1, 1, 'F', 'RU', 'Chem', 'NA', 1, datetime(1984, 12, 12), 'Eng', 'abc')
candidate3 = AppUser('c3@c.com', 'candidate', '3', 'Mr', 1, 1, 'M', 'UFH', 'Phys', 'NA', 1, datetime(1984, 12, 12), 'Eng', 'abc')
event_admin = AppUser('ea@ea.com', 'event_admin', '1', 'Ms', 1, 1, 'F', 'NWU', 'Math', 'NA', 1, datetime(1984, 12, 12), 'Eng', 'abc')
users = [candidate1, candidate2, candidate3, event_admin]
for user in users:
user.verify()
db.session.add_all(users)
event_role = EventRole('admin', 4, 1)
db.session.add(event_role)
responses = [
Response(1, 1, True, datetime(2019, 4, 10)),
Response(1, 2, True, datetime(2019, 4, 9), True, datetime(2019, 4, 11)),
Response(2, 3, True)
]
db.session.add_all(responses)
db.session.commit()
def test_user_profile_list(self):
self.seed_static_data()
self.setup_responses()
header = self.get_auth_header_for('ea@ea.com')
params = {'event_id': 1}
response = self.app.get('/api/v1/userprofilelist', headers=header, data=params)
data = json.loads(response.data)
data = sorted(data, key=lambda k: k['user_id'])
self.assertEqual(len(data), 2)
self.assertEqual(data[0]['user_id'], 1)
self.assertEqual(data[0]['is_submitted'], True)
self.assertEqual(data[0]['submitted_timestamp'], u'2019-04-10T00:00:00')
self.assertEqual(data[0]['is_withdrawn'], False)
self.assertEqual(data[0]['withdrawn_timestamp'], None)
self.assertEqual(data[1]['user_id'], 2)
self.assertEqual(data[1]['is_submitted'], True)
self.assertEqual(data[1]['submitted_timestamp'], u'2019-04-09T00:00:00')
self.assertEqual(data[1]['is_withdrawn'], True)
self.assertEqual(data[1]['withdrawn_timestamp'], u'2019-04-11T00:00:00')
class UserCommentAPITest(ApiTestCase):
def seed_static_data(self):
db.session.add(UserCategory('Postdoc'))
db.session.add(Country('South Africa'))
self.event1 = Event('Indaba', 'Indaba Event',
datetime.now(), datetime.now())
db.session.add(self.event1)
db.session.commit()
self.event1_id = self.event1.id
user_data1 = USER_DATA.copy()
response = self.app.post('/api/v1/user', data=user_data1)
self.user1 = json.loads(response.data)
user_data2 = USER_DATA.copy()
user_data2['email'] = 'person2@person.com'
user_data2['firstname'] = 'Person'
user_data2['lastname'] = 'Two'
response = self.app.post('/api/v1/user', data=user_data2)
self.user2 = json.loads(response.data)
user2 = db.session.query(AppUser).filter(AppUser.email == 'person2@person.com').first()
user2.is_admin = True
db.session.flush()
def seed_comments(self):
self.comment1 = UserComment(self.event1_id, self.user1['id'], self.user2['id'], datetime.now(), 'Comment 1')
self.comment2 = UserComment(self.event1_id, self.user1['id'], self.user2['id'], datetime.now(), 'Comment 2')
self.comment3 = UserComment(self.event1_id, self.user2['id'], self.user1['id'], datetime.now(), 'Comment 3')
db.session.add_all([self.comment1, self.comment2, self.comment3])
db.session.flush()
def test_post_comment(self):
with app.app_context():
self.seed_static_data()
params = {'event_id': self.event1_id, 'user_id': self.user2['id'], 'comment': 'Comment1'}
print('Sending params: ', params)
response = self.app.post('/api/v1/user-comment', headers={'Authorization': self.user1['token']}, data=json.dumps(params), content_type='application/json')
data = json.loads(response.data)
self.assertEqual(response.status_code, 201)
def test_get_forbidden(self):
with app.app_context():
self.seed_static_data()
self.seed_comments()
params = {'event_id': self.event1_id, 'user_id': self.user2['id']}
response = self.app.get('/api/v1/user-comment', headers={'Authorization': self.user1['token']}, query_string=params)
self.assertEqual(response.status_code, 403)
def test_get_comments(self):
with app.app_context():
self.seed_static_data()
self.seed_comments()
params = {'event_id': self.event1_id, 'user_id': self.user1['id']}
response = self.app.get('/api/v1/user-comment', headers={'Authorization': self.user2['token']}, query_string=params)
comment_list = json.loads(response.data)
self.assertEqual(len(comment_list), 2)
self.assertEqual(comment_list[0]['event_id'], self.comment1.event_id)
self.assertEqual(comment_list[0]['user_id'], self.comment1.user_id)
self.assertEqual(comment_list[0]['comment_by_user_firstname'], self.user2['firstname'])
self.assertEqual(comment_list[0]['comment_by_user_lastname'], self.user2['lastname'])
self.assertEqual(comment_list[0]['comment'], self.comment1.comment)
self.assertEqual(comment_list[1]['event_id'], self.comment2.event_id)
self.assertEqual(comment_list[1]['user_id'], self.comment2.user_id)
self.assertEqual(comment_list[1]['comment_by_user_firstname'], self.user2['firstname'])
self.assertEqual(comment_list[1]['comment_by_user_lastname'], self.user2['lastname'])
self.assertEqual(comment_list[1]['comment'], self.comment2.comment)
| true | true |
f73c82e8143ed57e0f378ea42933a36fbdfc4f0c | 8,287 | py | Python | TUI/TCC/SlewWdg/RotWdg.py | StarkillerX42/stui | 668628cf7539e7d2be12846033141e4eb8616fe1 | [
"BSD-3-Clause"
] | null | null | null | TUI/TCC/SlewWdg/RotWdg.py | StarkillerX42/stui | 668628cf7539e7d2be12846033141e4eb8616fe1 | [
"BSD-3-Clause"
] | null | null | null | TUI/TCC/SlewWdg/RotWdg.py | StarkillerX42/stui | 668628cf7539e7d2be12846033141e4eb8616fe1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Allows the user to specify rotation type and rotation angle.
History:
2001-11-05 ROwen First version with history.
2002-06-11 ROwen Disables invalid rot types based on coordSysvar.
2002-06-25 ROwen Removed an unneeded keyword from inputCont.
2002-07-31 ROwen Modified to use the RO.CoordSys module.
2002-08-02 ROwen Changed the default angle to from 0 to "0"
(no decimal pt shown)
2002-08-02 ROwen Mod. rot. type menu to take focus and have a separator.
2002-11-15 ROwen Mod. to use ROOptionMenu and added help strings.
2002-11-26 ROwen Changed to URL-based help.
2003-03-11 ROwen Changed to use OptionMenu instead of ROOptionMenu.
2003-03-21 ROwen Fixed accessing of menu from rotTypeWdg.
2003-03-26 ROwen Use the tcc model to disable wdg if no rot;
this means RotWdg now needs the dispatcher.
2003-04-14 ROwen Modified to use TUI.TCC.UserModel.
2003-04-28 ROwen Added rotType to user model;
added update of rotator limits for mount rotation;
added disable of rot angle if rot type is None.
2003-06-09 ROwen Removed dispatcher arg.
2003-06-13 ROwen Implemented helpText.
2003-07-10 ROwen Modified to use overhauled RO.InputCont.
2003-10-23 ROwen Modified to allow abbreviations.
2003-10-24 ROwen Added userModel input.
2005-08-01 ROwen Corrected an indentation inconsistency.
2008-04-28 ROwen Strip "+" symbols from values since the TCC can't handle them.
2009-04-01 ROwen Updated test code to use TUI.Base.TestDispatcher.
2010-03-12 ROwen Changed to use Models.getModel.
2010-06-28 ROwen Removed duplicate import (thanks to pychecker).
"""
import Tkinter
import RO.CoordSys
import RO.InputCont
import RO.Wdg
import RO.StringUtil
import TUI.Models
import TUI.TCC.UserModel
_rt_Object = "Object"
_rt_Horizon = "Horizon"
_rt_Mount = "Mount"
_rt_None = "None"
_RotTypeMenuItems = (
_rt_Object,
_rt_Horizon,
None,
_rt_Mount,
_rt_None,
)
_RotTypeHelpDict = {
_rt_Object: "Rotate with object",
_rt_Horizon: "Rotate with horizon",
_rt_Mount: "Move rotator to a fixed angle",
_rt_None: "Leave the rotator where it is",
}
_RotAngHelpDict = {
_rt_Object: "Angle of object with respect to the instrument",
_rt_Horizon: "Angle of az/alt with respect to the instrument",
_rt_Mount: "Angle sent to the rotator controller",
}
_HelpPrefix = "Telescope/SlewWin/index.html#"
_StdRotLim = [-360, 360]
class RotWdg (RO.Wdg.InputContFrame):
"""Allows the user to specify rotation type and rotation angle.
Inputs:
- master master Tk widget -- typically a frame or window
- userModel a TUI.TCC.UserModel; specify only if global model
not wanted (e.g. for checking catalog values);
if specified, assumes rot exists with std limits
- **kargs keyword arguments for Tkinter.Frame
"""
# rotation types
def __init__ (self,
master,
userModel = None,
**kargs):
RO.Wdg.InputContFrame.__init__(self, master, **kargs)
self.coordSys = None
self.rotLim = _StdRotLim
self.rotExists = True
self.userModel = userModel or TUI.TCC.UserModel.Model()
self.rotAngWdg = RO.Wdg.FloatEntry(self,
minValue = self.rotLim[0],
maxValue = self.rotLim[1],
defValue = "0",
defFormat = "%.2f",
helpURL = _HelpPrefix + "RotAngle",
minMenu = "Minimum",
maxMenu = "Maximum",
)
self.rotAngWdg.grid(row=0, column=0)
Tkinter.Label(self, text=RO.StringUtil.DegStr).grid(row=0, column=1)
self.rotTypeWdg = RO.Wdg.OptionMenu(self,
items = _RotTypeMenuItems,
defValue = _rt_Object,
abbrevOK = True,
ignoreCase = True,
var = self.userModel.rotType.getVar(),
helpText = [_RotTypeHelpDict.get(item) for item in _RotTypeMenuItems],
helpURL = "Telescope/RotTypes.html",
)
self.rotTypeWdg.grid(row=0, column=2)
if not userModel:
# uses global user model,
# hence wants normal connection to rot info
tccModel = TUI.Models.getModel("tcc")
tccModel.ipConfig.addCallback(self._ipConfigCallback, callNow=False)
tccModel.rotLim.addCallback(self._rotLimCallback)
self.userModel.coordSysName.addCallback(self._coordSysChanged)
self.userModel.rotType.addCallback(self._rotTypeChanged)
# create a set of input widget containers
# for easy set/get of data
self.inputCont = RO.InputCont.ContList (
conts = [
RO.InputCont.WdgCont (
name = "RotAngle",
wdgs = self.rotAngWdg,
omitDef = False,
formatFunc = RO.InputCont.VMSQualFmt(stripPlusses=True),
),
RO.InputCont.WdgCont (
name = "RotType",
wdgs = self.rotTypeWdg,
omitDef = False,
formatFunc = RO.InputCont.VMSQualFmt(),
),
],
formatFunc = RO.InputCont.BasicContListFmt(valSep = ""),
)
def _ipConfigCallback(self, keyVar):
#print "%s._ipConfigCallback(%s)" % (self.__class__.__name__, keyVar)
if not keyVar.isCurrent:
return
ipConfig = keyVar[0]
self.rotExists = ipConfig.lower()[0] == "t"
self._setEnable()
def _rotLimCallback(self, keyVar):
rotLim = keyVar.valueList
if None in rotLim:
return
self.rotLim = rotLim[0:2]
self._rotTypeChanged(self.userModel.rotType.get())
def _rotTypeChanged(self, rotType):
if rotType == "Mount":
self.rotAngWdg.setRange(*self.rotLim)
else:
self.rotAngWdg.setRange(*_StdRotLim)
self._setEnable()
self.rotAngWdg.helpText = _RotAngHelpDict.get(rotType)
def _setEnable(self):
rotType = self.userModel.rotType.get()
self.rotTypeWdg.setEnable(self.rotExists)
self.rotAngWdg.setEnable(self.rotExists and (rotType != "None"))
def _coordSysChanged (self, coordSys):
if coordSys == self.coordSys:
return
rotType = self.userModel.rotType.get()
rotTypeMenu = self.rotTypeWdg.getMenu()
physMountCSysSet = (RO.CoordSys.Physical, RO.CoordSys.Mount)
if coordSys in physMountCSysSet:
if self.coordSys not in physMountCSysSet:
# changed from all acceptable or unknown to some acceptable
# only phys, mount and none are acceptable; disable the others
# and try to set a default of mount, no value, value required
rotTypeMenu.entryconfigure(0, state="disabled")
rotTypeMenu.entryconfigure(1, state="disabled")
if rotType in (_rt_Object, _rt_Horizon):
self.userModel.rotType.set(_rt_None)
else:
if self.coordSys in physMountCSysSet:
# changed from some acceptable or unknown to all acceptable
# all are acceptable; default is 0 deg Obj
rotTypeMenu.entryconfigure(0, state="normal")
rotTypeMenu.entryconfigure(1, state="normal")
if rotType != _rt_None:
self.userModel.rotType.set(_rt_Object)
self.coordSys = coordSys
if __name__ == "__main__":
import TUI.Base.TestDispatcher
testDispatcher = TUI.Base.TestDispatcher.TestDispatcher("tcc")
tuiModel = testDispatcher.tuiModel
root = tuiModel.tkRoot
def restoreDefault():
rotWdg.restoreDefault()
def printOptions(*args):
print(rotWdg.getString())
getButton = Tkinter.Button (root, command=restoreDefault, text="Defaults")
getButton.pack()
getButton = Tkinter.Button (root, command=printOptions, text="Print Options")
getButton.pack()
rotWdg = RotWdg(root)
rotWdg.pack()
tuiModel.reactor.run()
| 36.831111 | 82 | 0.624231 |
import Tkinter
import RO.CoordSys
import RO.InputCont
import RO.Wdg
import RO.StringUtil
import TUI.Models
import TUI.TCC.UserModel
_rt_Object = "Object"
_rt_Horizon = "Horizon"
_rt_Mount = "Mount"
_rt_None = "None"
_RotTypeMenuItems = (
_rt_Object,
_rt_Horizon,
None,
_rt_Mount,
_rt_None,
)
_RotTypeHelpDict = {
_rt_Object: "Rotate with object",
_rt_Horizon: "Rotate with horizon",
_rt_Mount: "Move rotator to a fixed angle",
_rt_None: "Leave the rotator where it is",
}
_RotAngHelpDict = {
_rt_Object: "Angle of object with respect to the instrument",
_rt_Horizon: "Angle of az/alt with respect to the instrument",
_rt_Mount: "Angle sent to the rotator controller",
}
_HelpPrefix = "Telescope/SlewWin/index.html#"
_StdRotLim = [-360, 360]
class RotWdg (RO.Wdg.InputContFrame):
def __init__ (self,
master,
userModel = None,
**kargs):
RO.Wdg.InputContFrame.__init__(self, master, **kargs)
self.coordSys = None
self.rotLim = _StdRotLim
self.rotExists = True
self.userModel = userModel or TUI.TCC.UserModel.Model()
self.rotAngWdg = RO.Wdg.FloatEntry(self,
minValue = self.rotLim[0],
maxValue = self.rotLim[1],
defValue = "0",
defFormat = "%.2f",
helpURL = _HelpPrefix + "RotAngle",
minMenu = "Minimum",
maxMenu = "Maximum",
)
self.rotAngWdg.grid(row=0, column=0)
Tkinter.Label(self, text=RO.StringUtil.DegStr).grid(row=0, column=1)
self.rotTypeWdg = RO.Wdg.OptionMenu(self,
items = _RotTypeMenuItems,
defValue = _rt_Object,
abbrevOK = True,
ignoreCase = True,
var = self.userModel.rotType.getVar(),
helpText = [_RotTypeHelpDict.get(item) for item in _RotTypeMenuItems],
helpURL = "Telescope/RotTypes.html",
)
self.rotTypeWdg.grid(row=0, column=2)
if not userModel:
tccModel = TUI.Models.getModel("tcc")
tccModel.ipConfig.addCallback(self._ipConfigCallback, callNow=False)
tccModel.rotLim.addCallback(self._rotLimCallback)
self.userModel.coordSysName.addCallback(self._coordSysChanged)
self.userModel.rotType.addCallback(self._rotTypeChanged)
self.inputCont = RO.InputCont.ContList (
conts = [
RO.InputCont.WdgCont (
name = "RotAngle",
wdgs = self.rotAngWdg,
omitDef = False,
formatFunc = RO.InputCont.VMSQualFmt(stripPlusses=True),
),
RO.InputCont.WdgCont (
name = "RotType",
wdgs = self.rotTypeWdg,
omitDef = False,
formatFunc = RO.InputCont.VMSQualFmt(),
),
],
formatFunc = RO.InputCont.BasicContListFmt(valSep = ""),
)
def _ipConfigCallback(self, keyVar):
if not keyVar.isCurrent:
return
ipConfig = keyVar[0]
self.rotExists = ipConfig.lower()[0] == "t"
self._setEnable()
def _rotLimCallback(self, keyVar):
rotLim = keyVar.valueList
if None in rotLim:
return
self.rotLim = rotLim[0:2]
self._rotTypeChanged(self.userModel.rotType.get())
def _rotTypeChanged(self, rotType):
if rotType == "Mount":
self.rotAngWdg.setRange(*self.rotLim)
else:
self.rotAngWdg.setRange(*_StdRotLim)
self._setEnable()
self.rotAngWdg.helpText = _RotAngHelpDict.get(rotType)
def _setEnable(self):
rotType = self.userModel.rotType.get()
self.rotTypeWdg.setEnable(self.rotExists)
self.rotAngWdg.setEnable(self.rotExists and (rotType != "None"))
def _coordSysChanged (self, coordSys):
if coordSys == self.coordSys:
return
rotType = self.userModel.rotType.get()
rotTypeMenu = self.rotTypeWdg.getMenu()
physMountCSysSet = (RO.CoordSys.Physical, RO.CoordSys.Mount)
if coordSys in physMountCSysSet:
if self.coordSys not in physMountCSysSet:
rotTypeMenu.entryconfigure(0, state="disabled")
rotTypeMenu.entryconfigure(1, state="disabled")
if rotType in (_rt_Object, _rt_Horizon):
self.userModel.rotType.set(_rt_None)
else:
if self.coordSys in physMountCSysSet:
rotTypeMenu.entryconfigure(0, state="normal")
rotTypeMenu.entryconfigure(1, state="normal")
if rotType != _rt_None:
self.userModel.rotType.set(_rt_Object)
self.coordSys = coordSys
if __name__ == "__main__":
import TUI.Base.TestDispatcher
testDispatcher = TUI.Base.TestDispatcher.TestDispatcher("tcc")
tuiModel = testDispatcher.tuiModel
root = tuiModel.tkRoot
def restoreDefault():
rotWdg.restoreDefault()
def printOptions(*args):
print(rotWdg.getString())
getButton = Tkinter.Button (root, command=restoreDefault, text="Defaults")
getButton.pack()
getButton = Tkinter.Button (root, command=printOptions, text="Print Options")
getButton.pack()
rotWdg = RotWdg(root)
rotWdg.pack()
tuiModel.reactor.run()
| true | true |
f73c838c2e157244c00a4183cdec57cebf041865 | 8,726 | py | Python | py2.7/multiprocess/heap.py | geofft/multiprocess | d998ffea9e82d17662b12b94a236182e7fde46d5 | [
"BSD-3-Clause"
] | 356 | 2015-06-21T21:05:10.000Z | 2022-03-30T11:57:08.000Z | py2.7/multiprocess/heap.py | geofft/multiprocess | d998ffea9e82d17662b12b94a236182e7fde46d5 | [
"BSD-3-Clause"
] | 103 | 2015-06-22T01:44:14.000Z | 2022-03-01T03:44:25.000Z | py2.7/multiprocess/heap.py | geofft/multiprocess | d998ffea9e82d17662b12b94a236182e7fde46d5 | [
"BSD-3-Clause"
] | 72 | 2015-09-02T14:10:24.000Z | 2022-03-25T06:49:43.000Z | #
# Module which supports allocation of memory from an mmap
#
# multiprocessing/heap.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import bisect
import mmap
import tempfile
import os
import sys
import threading
import itertools
try:
import _multiprocess as _multiprocessing
except ImportError:
import _multiprocessing
from multiprocess.util import Finalize, info
from multiprocess.forking import assert_spawning
__all__ = ['BufferWrapper']
#
# Inheirtable class which wraps an mmap, and from which blocks can be allocated
#
if sys.platform == 'win32':
try:
from _multiprocess import win32
except ImportError:
from _multiprocessing import win32
class Arena(object):
_counter = itertools.count()
def __init__(self, size):
self.size = size
self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next())
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == 0, 'tagname already in use'
self._state = (self.size, self.name)
def __getstate__(self):
assert_spawning(self)
return self._state
def __setstate__(self, state):
self.size, self.name = self._state = state
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
else:
class Arena(object):
def __init__(self, size):
self.buffer = mmap.mmap(-1, size)
self.size = size
self.name = None
#
# Class allowing allocation of chunks of memory from arenas
#
class Heap(object):
_alignment = 8
def __init__(self, size=mmap.PAGESIZE):
self._lastpid = os.getpid()
self._lock = threading.Lock()
self._size = size
self._lengths = []
self._len_to_seq = {}
self._start_to_block = {}
self._stop_to_block = {}
self._allocated_blocks = set()
self._arenas = []
# list of pending blocks to free - see free() comment below
self._pending_free_blocks = []
@staticmethod
def _roundup(n, alignment):
# alignment must be a power of 2
mask = alignment - 1
return (n + mask) & ~mask
def _malloc(self, size):
# returns a large enough block -- it might be much larger
i = bisect.bisect_left(self._lengths, size)
if i == len(self._lengths):
length = self._roundup(max(self._size, size), mmap.PAGESIZE)
self._size *= 2
info('allocating a new mmap of length %d', length)
arena = Arena(length)
self._arenas.append(arena)
return (arena, 0, length)
else:
length = self._lengths[i]
seq = self._len_to_seq[length]
block = seq.pop()
if not seq:
del self._len_to_seq[length], self._lengths[i]
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
return block
def _free(self, block):
# free location and try to merge with neighbours
(arena, start, stop) = block
try:
prev_block = self._stop_to_block[(arena, start)]
except KeyError:
pass
else:
start, _ = self._absorb(prev_block)
try:
next_block = self._start_to_block[(arena, stop)]
except KeyError:
pass
else:
_, stop = self._absorb(next_block)
block = (arena, start, stop)
length = stop - start
try:
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)
self._start_to_block[(arena, start)] = block
self._stop_to_block[(arena, stop)] = block
def _absorb(self, block):
# deregister this block so it can be merged with a neighbour
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
length = stop - start
seq = self._len_to_seq[length]
seq.remove(block)
if not seq:
del self._len_to_seq[length]
self._lengths.remove(length)
return start, stop
def _free_pending_blocks(self):
# Free all the blocks in the pending list - called with the lock held.
while True:
try:
block = self._pending_free_blocks.pop()
except IndexError:
break
self._allocated_blocks.remove(block)
self._free(block)
def free(self, block):
# free a block returned by malloc()
# Since free() can be called asynchronously by the GC, it could happen
# that it's called while self._lock is held: in that case,
# self._lock.acquire() would deadlock (issue #12352). To avoid that, a
# trylock is used instead, and if the lock can't be acquired
# immediately, the block is added to a list of blocks to be freed
# synchronously sometimes later from malloc() or free(), by calling
# _free_pending_blocks() (appending and retrieving from a list is not
# strictly thread-safe but under cPython it's atomic thanks to the GIL).
assert os.getpid() == self._lastpid
if not self._lock.acquire(False):
# can't acquire the lock right now, add the block to the list of
# pending blocks to free
self._pending_free_blocks.append(block)
else:
# we hold the lock
try:
self._free_pending_blocks()
self._allocated_blocks.remove(block)
self._free(block)
finally:
self._lock.release()
def malloc(self, size):
# return a block of right size (possibly rounded up)
assert 0 <= size < sys.maxint
if os.getpid() != self._lastpid:
self.__init__() # reinitialize after fork
self._lock.acquire()
self._free_pending_blocks()
try:
size = self._roundup(max(size,1), self._alignment)
(arena, start, stop) = self._malloc(size)
new_stop = start + size
if new_stop < stop:
self._free((arena, new_stop, stop))
block = (arena, start, new_stop)
self._allocated_blocks.add(block)
return block
finally:
self._lock.release()
#
# Class representing a chunk of an mmap -- can be inherited
#
class BufferWrapper(object):
_heap = Heap()
def __init__(self, size):
assert 0 <= size < sys.maxint
block = BufferWrapper._heap.malloc(size)
self._state = (block, size)
Finalize(self, BufferWrapper._heap.free, args=(block,))
def get_address(self):
(arena, start, stop), size = self._state
address, length = _multiprocessing.address_of_buffer(arena.buffer)
assert size <= length
return address + start
def get_size(self):
return self._state[1]
| 33.43295 | 80 | 0.625029 |
import bisect
import mmap
import tempfile
import os
import sys
import threading
import itertools
try:
import _multiprocess as _multiprocessing
except ImportError:
import _multiprocessing
from multiprocess.util import Finalize, info
from multiprocess.forking import assert_spawning
__all__ = ['BufferWrapper']
if sys.platform == 'win32':
try:
from _multiprocess import win32
except ImportError:
from _multiprocessing import win32
class Arena(object):
_counter = itertools.count()
def __init__(self, size):
self.size = size
self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next())
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == 0, 'tagname already in use'
self._state = (self.size, self.name)
def __getstate__(self):
assert_spawning(self)
return self._state
def __setstate__(self, state):
self.size, self.name = self._state = state
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
else:
class Arena(object):
def __init__(self, size):
self.buffer = mmap.mmap(-1, size)
self.size = size
self.name = None
class Heap(object):
_alignment = 8
def __init__(self, size=mmap.PAGESIZE):
self._lastpid = os.getpid()
self._lock = threading.Lock()
self._size = size
self._lengths = []
self._len_to_seq = {}
self._start_to_block = {}
self._stop_to_block = {}
self._allocated_blocks = set()
self._arenas = []
self._pending_free_blocks = []
@staticmethod
def _roundup(n, alignment):
mask = alignment - 1
return (n + mask) & ~mask
def _malloc(self, size):
i = bisect.bisect_left(self._lengths, size)
if i == len(self._lengths):
length = self._roundup(max(self._size, size), mmap.PAGESIZE)
self._size *= 2
info('allocating a new mmap of length %d', length)
arena = Arena(length)
self._arenas.append(arena)
return (arena, 0, length)
else:
length = self._lengths[i]
seq = self._len_to_seq[length]
block = seq.pop()
if not seq:
del self._len_to_seq[length], self._lengths[i]
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
return block
def _free(self, block):
(arena, start, stop) = block
try:
prev_block = self._stop_to_block[(arena, start)]
except KeyError:
pass
else:
start, _ = self._absorb(prev_block)
try:
next_block = self._start_to_block[(arena, stop)]
except KeyError:
pass
else:
_, stop = self._absorb(next_block)
block = (arena, start, stop)
length = stop - start
try:
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)
self._start_to_block[(arena, start)] = block
self._stop_to_block[(arena, stop)] = block
def _absorb(self, block):
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
length = stop - start
seq = self._len_to_seq[length]
seq.remove(block)
if not seq:
del self._len_to_seq[length]
self._lengths.remove(length)
return start, stop
def _free_pending_blocks(self):
while True:
try:
block = self._pending_free_blocks.pop()
except IndexError:
break
self._allocated_blocks.remove(block)
self._free(block)
def free(self, block):
# self._lock.acquire() would deadlock (issue #12352). To avoid that, a
# trylock is used instead, and if the lock can't be acquired
assert os.getpid() == self._lastpid
if not self._lock.acquire(False):
# can't acquire the lock right now, add the block to the list of
self._pending_free_blocks.append(block)
else:
try:
self._free_pending_blocks()
self._allocated_blocks.remove(block)
self._free(block)
finally:
self._lock.release()
def malloc(self, size):
assert 0 <= size < sys.maxint
if os.getpid() != self._lastpid:
self.__init__()
self._lock.acquire()
self._free_pending_blocks()
try:
size = self._roundup(max(size,1), self._alignment)
(arena, start, stop) = self._malloc(size)
new_stop = start + size
if new_stop < stop:
self._free((arena, new_stop, stop))
block = (arena, start, new_stop)
self._allocated_blocks.add(block)
return block
finally:
self._lock.release()
class BufferWrapper(object):
_heap = Heap()
def __init__(self, size):
assert 0 <= size < sys.maxint
block = BufferWrapper._heap.malloc(size)
self._state = (block, size)
Finalize(self, BufferWrapper._heap.free, args=(block,))
def get_address(self):
(arena, start, stop), size = self._state
address, length = _multiprocessing.address_of_buffer(arena.buffer)
assert size <= length
return address + start
def get_size(self):
return self._state[1]
| true | true |
f73c8453c8ba25eef4d2b8e80cb0ad1c989d10d0 | 1,844 | py | Python | jdhapi/migrations/0007_article_issue.py | C2DH/jdhbackend | 37efb0c5a6e1ea2acf8aca477d052a4e33f9bf40 | [
"MIT"
] | null | null | null | jdhapi/migrations/0007_article_issue.py | C2DH/jdhbackend | 37efb0c5a6e1ea2acf8aca477d052a4e33f9bf40 | [
"MIT"
] | 59 | 2020-11-27T08:58:35.000Z | 2022-03-30T15:54:01.000Z | jdhapi/migrations/0007_article_issue.py | C2DH/jdhbackend | 37efb0c5a6e1ea2acf8aca477d052a4e33f9bf40 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.3 on 2021-05-12 18:49
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('jdhapi', '0006_auto_20201127_1355'),
]
operations = [
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(db_column='id', primary_key=True, serialize=False)),
('title', models.CharField(max_length=250)),
('description', models.TextField(blank=True, null=True)),
('creation_date', models.DateTimeField(default=django.utils.timezone.now)),
('publication_date', models.DateTimeField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Article',
fields=[
('abstract', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='jdhapi.abstract')),
('repository_url', models.URLField(blank=True, max_length=254, null=True)),
('notebook_url', models.URLField(blank=True, max_length=254, null=True)),
('notebook_commit_hash', models.CharField(blank=True, default='', help_text='store the git hash', max_length=22)),
('status', models.CharField(choices=[('DRAFT', 'Draft'), ('INTERNAL_REVIEW', 'Internal_review'), ('EXTERNAL_REVIEW', 'External_review'), ('PUBLISHED', 'Published')], default='DRAFT', max_length=15)),
('repository_type', models.CharField(choices=[('GITHUB', 'Github'), ('GITLAB', 'Gitlab')], default='GITHUB', max_length=15)),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jdhapi.issue')),
],
),
]
| 48.526316 | 215 | 0.614967 |
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('jdhapi', '0006_auto_20201127_1355'),
]
operations = [
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(db_column='id', primary_key=True, serialize=False)),
('title', models.CharField(max_length=250)),
('description', models.TextField(blank=True, null=True)),
('creation_date', models.DateTimeField(default=django.utils.timezone.now)),
('publication_date', models.DateTimeField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Article',
fields=[
('abstract', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='jdhapi.abstract')),
('repository_url', models.URLField(blank=True, max_length=254, null=True)),
('notebook_url', models.URLField(blank=True, max_length=254, null=True)),
('notebook_commit_hash', models.CharField(blank=True, default='', help_text='store the git hash', max_length=22)),
('status', models.CharField(choices=[('DRAFT', 'Draft'), ('INTERNAL_REVIEW', 'Internal_review'), ('EXTERNAL_REVIEW', 'External_review'), ('PUBLISHED', 'Published')], default='DRAFT', max_length=15)),
('repository_type', models.CharField(choices=[('GITHUB', 'Github'), ('GITLAB', 'Gitlab')], default='GITHUB', max_length=15)),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jdhapi.issue')),
],
),
]
| true | true |
f73c849cc55ec1c9e0540d071ed3ed2e7e255620 | 1,444 | py | Python | led-command-centre.py | kazma89/Taller_RPi | c54002746b8ab4c9eb86c4ee70efb5be712968c2 | [
"MIT"
] | null | null | null | led-command-centre.py | kazma89/Taller_RPi | c54002746b8ab4c9eb86c4ee70efb5be712968c2 | [
"MIT"
] | null | null | null | led-command-centre.py | kazma89/Taller_RPi | c54002746b8ab4c9eb86c4ee70efb5be712968c2 | [
"MIT"
] | null | null | null | from tkinter import *
import tkinter.font
from gpiozero import LED
import RPi.GPIO
RPi.GPIO.setmode(RPi.GPIO.BCM)
### HARDWARE DEFINITIONS ###
# LED pin definitions
led0 = LED(7)
led1 = LED(8)
led2 = LED(25)
led3 = LED(23)
led4 = LED(24)
led5 = LED(18)
led6 = LED(15)
led7 = LED(14)
# Arrange LEDs into a list
leds = [led7,led6,led5,led4,led3,led2,led1,led0]
### GUI DEFINITIONS ###
win=Tk()
win.title("LED Controller")
myFont=tkinter.font.Font(family = 'Helvetica', size = 12, weight = "bold")
ledCode = StringVar()
### Event Functions ###
def ledShow():
ledCode = code.get()
print("LED code: ", ledCode) #Debug
i=0 #loop-counter
# For each character in the ledCode string, check if = 1 and if so,
# turn on the corresponding LED
for c in ledCode:
if c == "1":
leds[i].on()
else:
leds[i].off()
i+=1
def close(): # Cleanly close the GUI and cleanup the GPIO
RPi.GPIO.cleanup()
win.destroy()
### WIDGETS ###
ledButton = Button(win, text='Load LED code', font=myFont, command=ledShow, bg='bisque2', height=1)
ledButton.grid(row=0,column=1)
code = Entry(win, font=myFont, width=10)
code.grid(row=0,column=0)
exitButton = Button(win, text='Exit', font=myFont, command=close, bg='red', height=1, width=6)
exitButton.grid(row=3,column=1, sticky=E)
win.protocol("WM_DELETE_WINDOW", close) # cleanup GPIO when user closes window
win.mainloop() # Loops forever | 24.066667 | 99 | 0.659972 | from tkinter import *
import tkinter.font
from gpiozero import LED
import RPi.GPIO
RPi.GPIO.setmode(RPi.GPIO.BCM)
led4 = LED(24)
led5 = LED(18)
led6 = LED(15)
led7 = LED(14)
leds = [led7,led6,led5,led4,led3,led2,led1,led0]
ter.font.Font(family = 'Helvetica', size = 12, weight = "bold")
ledCode = StringVar()
t("LED code: ", ledCode)
i=0
for c in ledCode:
if c == "1":
leds[i].on()
else:
leds[i].off()
i+=1
def close():
RPi.GPIO.cleanup()
win.destroy()
oad LED code', font=myFont, command=ledShow, bg='bisque2', height=1)
ledButton.grid(row=0,column=1)
code = Entry(win, font=myFont, width=10)
code.grid(row=0,column=0)
exitButton = Button(win, text='Exit', font=myFont, command=close, bg='red', height=1, width=6)
exitButton.grid(row=3,column=1, sticky=E)
win.protocol("WM_DELETE_WINDOW", close)
win.mainloop() | true | true |
f73c84db13b89da5deea2f1aa8c5b93dd83ea3c9 | 3,487 | py | Python | Uwallet/tests/_test_target.py | monotone/Ulord-platform | 8ec19bbb8845db8f22df925d33b118b22dab0d0b | [
"MIT"
] | 28 | 2018-04-27T08:02:18.000Z | 2020-01-14T05:08:34.000Z | Uwallet/tests/_test_target.py | monotone/Ulord-platform | 8ec19bbb8845db8f22df925d33b118b22dab0d0b | [
"MIT"
] | 2 | 2018-05-16T08:29:20.000Z | 2018-06-17T04:51:08.000Z | Uwallet/tests/_test_target.py | monotone/Ulord-platform | 8ec19bbb8845db8f22df925d33b118b22dab0d0b | [
"MIT"
] | 4 | 2018-05-14T11:43:31.000Z | 2018-09-29T09:58:58.000Z | from uwallet.blockchain import unet
from uwallet.blockchain import ArithUint256
GENESIS_BITS = 0x1f07ffff
MAX_TARGET = 0x0007FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
N_TARGET_TIMESPAN = 150
def check_bits(bits):
bitsN = (bits >> 24) & 0xff
assert 0x03 <= bitsN <= 0x1f, \
"First part of bits should be in [0x03, 0x1d], but it was {}".format(hex(bitsN))
bitsBase = bits & 0xffffff
assert 0x8000 <= bitsBase <= 0x7fffff, \
"Second part of bits should be in [0x8000, 0x7fffff] but it was {}".format(bitsBase)
def get_target(index, first, last, chain='main'):
"""
this follows the calculations in lbrycrd/src/lbry.cpp
Returns: (bits, target)
"""
if index == 0:
return GENESIS_BITS, MAX_TARGET
assert last is not None, "Last shouldn't be none"
# bits to target
bits = last.get('bits')
# print_error("Last bits: ", bits)
self.check_bits(bits)
# new target
nActualTimespan = last.get('timestamp') - first.get('timestamp')
nTargetTimespan = N_TARGET_TIMESPAN #150
nModulatedTimespan = nTargetTimespan - (nActualTimespan - nTargetTimespan) / 8
nMinTimespan = nTargetTimespan - (nTargetTimespan / 8)
nMaxTimespan = nTargetTimespan + (nTargetTimespan / 2)
if nModulatedTimespan < nMinTimespan:
nModulatedTimespan = nMinTimespan
elif nModulatedTimespan > nMaxTimespan:
nModulatedTimespan = nMaxTimespan
bnOld = ArithUint256.SetCompact(bits)
bnNew = bnOld * nModulatedTimespan
# this doesn't work if it is nTargetTimespan even though that
# is what it looks like it should be based on reading the code
# in lbry.cpp
bnNew /= nModulatedTimespan
if bnNew > MAX_TARGET:
bnNew = ArithUint256(MAX_TARGET)
return bnNew.GetCompact(), bnNew._value
def verify_target(block):
#bits = int('0x' + block.get('bits'), 16)
bits = int(block.get('bits'),16)
#print bits
_, target = bits_to_target(bits)
int_hash = int('0x' + block.get('hash'), 16)
print ("int_hash: ", int_hash)
print ("target : ", target)
if (int_hash <= target):
print ("verify target success")
else:
print ("verify target failed")
def bits_to_target(rex):
value = ArithUint256.SetCompact(rex) #rex: 0x1111
return value.GetCompact(), value._value
def main():
# 2 block . data from blockchain-cli.
block_1 = {}
block_1['hash'] = '00022b278c567c27569618ba94fcfff38f0e5cffbce90a99cea80bc5cab89724'
block_1['bits'] = "1f07ffff"
block_1['timestamp'] = 1511426385
# 3 block
block_2 = {}
block_2['hash'] = '0006b528e8d76c90056d1685ca2dd9959726c7bfce9871ebd511a5aa44de0905'
block_2['bits'] = "1f07ffff"
block_2['timestamp'] = 1511426707
# 1000 block
block_3 = {}
block_3['hash'] = '000447de95629ec2efa86cf95c2042491278ff49b822f12b701cb4c848051376'
block_3['bits'] = "1f079a69"
block_3['timestamp'] = 1511774706
# 1001 block
block_4 = {}
block_4['hash'] = '0007ac5bea8f2088135eb987462d85a82ae953257f5ee771f3f0631043da1148'
block_4['bits'] = "1f07c913"
block_4['timestamp'] = 1511774724
bits, target = get_target(0, block_1, block_2)
#to test bits to target.
#bits_0, target_0 = bits_to_target(GENESIS_BITS)
#print ("bits_0 :", bits_0)
#print ("target_0:", target_0)
verify_target(block_3)
verify_target(block_4)
if __name__ == '__main__':
main()
| 31.7 | 96 | 0.675652 | from uwallet.blockchain import unet
from uwallet.blockchain import ArithUint256
GENESIS_BITS = 0x1f07ffff
MAX_TARGET = 0x0007FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
N_TARGET_TIMESPAN = 150
def check_bits(bits):
bitsN = (bits >> 24) & 0xff
assert 0x03 <= bitsN <= 0x1f, \
"First part of bits should be in [0x03, 0x1d], but it was {}".format(hex(bitsN))
bitsBase = bits & 0xffffff
assert 0x8000 <= bitsBase <= 0x7fffff, \
"Second part of bits should be in [0x8000, 0x7fffff] but it was {}".format(bitsBase)
def get_target(index, first, last, chain='main'):
if index == 0:
return GENESIS_BITS, MAX_TARGET
assert last is not None, "Last shouldn't be none"
# bits to target
bits = last.get('bits')
# print_error("Last bits: ", bits)
self.check_bits(bits)
# new target
nActualTimespan = last.get('timestamp') - first.get('timestamp')
nTargetTimespan = N_TARGET_TIMESPAN #150
nModulatedTimespan = nTargetTimespan - (nActualTimespan - nTargetTimespan) / 8
nMinTimespan = nTargetTimespan - (nTargetTimespan / 8)
nMaxTimespan = nTargetTimespan + (nTargetTimespan / 2)
if nModulatedTimespan < nMinTimespan:
nModulatedTimespan = nMinTimespan
elif nModulatedTimespan > nMaxTimespan:
nModulatedTimespan = nMaxTimespan
bnOld = ArithUint256.SetCompact(bits)
bnNew = bnOld * nModulatedTimespan
# this doesn't work if it is nTargetTimespan even though that
bnNew /= nModulatedTimespan
if bnNew > MAX_TARGET:
bnNew = ArithUint256(MAX_TARGET)
return bnNew.GetCompact(), bnNew._value
def verify_target(block):
bits = int(block.get('bits'),16)
_, target = bits_to_target(bits)
int_hash = int('0x' + block.get('hash'), 16)
print ("int_hash: ", int_hash)
print ("target : ", target)
if (int_hash <= target):
print ("verify target success")
else:
print ("verify target failed")
def bits_to_target(rex):
value = ArithUint256.SetCompact(rex)
return value.GetCompact(), value._value
def main():
block_1 = {}
block_1['hash'] = '00022b278c567c27569618ba94fcfff38f0e5cffbce90a99cea80bc5cab89724'
block_1['bits'] = "1f07ffff"
block_1['timestamp'] = 1511426385
block_2 = {}
block_2['hash'] = '0006b528e8d76c90056d1685ca2dd9959726c7bfce9871ebd511a5aa44de0905'
block_2['bits'] = "1f07ffff"
block_2['timestamp'] = 1511426707
block_3 = {}
block_3['hash'] = '000447de95629ec2efa86cf95c2042491278ff49b822f12b701cb4c848051376'
block_3['bits'] = "1f079a69"
block_3['timestamp'] = 1511774706
block_4 = {}
block_4['hash'] = '0007ac5bea8f2088135eb987462d85a82ae953257f5ee771f3f0631043da1148'
block_4['bits'] = "1f07c913"
block_4['timestamp'] = 1511774724
bits, target = get_target(0, block_1, block_2)
verify_target(block_3)
verify_target(block_4)
if __name__ == '__main__':
main()
| true | true |
f73c8530463a2ab9b827d38102b11b7895d02106 | 4,187 | py | Python | mrbaviirc/nestini.py | brianvanderburg2/mrbaviirc | 6aeee9b229d3f62aace98a51d9014781bbe6cb52 | [
"Apache-2.0"
] | null | null | null | mrbaviirc/nestini.py | brianvanderburg2/mrbaviirc | 6aeee9b229d3f62aace98a51d9014781bbe6cb52 | [
"Apache-2.0"
] | null | null | null | mrbaviirc/nestini.py | brianvanderburg2/mrbaviirc | 6aeee9b229d3f62aace98a51d9014781bbe6cb52 | [
"Apache-2.0"
] | null | null | null | """ This code parses INI files in a nested manor. """
__author__ = "Brian Allen Vanderburg II"
__copyright__ = "Copyright 2016"
__license__ = "Apache License 2.0"
try:
from collections import OrderedDict as dict
except ImportError:
pass
class NestedIniParser(object):
def __init__(self, parser):
""" Initialize the nexted INI parser. """
self.parser = parser
def parse(self):
""" Parse the INI data and return the results. """
results = dict()
parser = self.parser
for section in parser.sections():
target = self._get_target(results, section)
for option in parser.options(section):
self._set_value(target, option, parser.get(section, option))
return results
def _isint(self, value):
""" Is a value an integer. """
try:
result = int(value)
return True
except ValueError:
return False
def _get_target(self, results, section):
""" Find out where we should put items. """
parts = section.split(".")
target = results
count = len(parts)
for pos in range(count):
# What kind is it:
part = parts[pos]
if self._isint(part):
# Item before us should be a list
if not isinstance(target, list):
raise ValueError("Must be a list")
value = int(part)
if value < 0 or value > len(target):
raise ValueError("Invalid index.")
if value == len(target):
if (pos == count - 1) or not self._isint(parts[pos + 1]):
target.append(dict())
else:
target.append([])
target = target[value]
else:
# Item before us should be a dict
if not isinstance(target, dict):
raise ValueError("Must be a dict")
value = part
if not value in target:
if (pos == count - 1) or not self._isint(parts[pos + 1]):
target[value] = dict()
else:
target[value] = []
target = target[value]
if not isinstance(target, dict):
raise ValueError("Final result must be a dict.")
return target
def _set_value(self, target, name, data):
""" Set a value by parsing simlar to above. """
parts = name.split(".")
count = len(parts)
for pos in range(count):
# What kind is it
part = parts[pos]
if self._isint(part):
# Item before us should be a list
if not isinstance(target, list):
raise ValueError("Must be a list")
value = int(part)
if value < 0 or value > len(target):
raise ValueError("Invalid index.")
if pos == count - 1:
if value == len(target):
target.append(data)
else:
target[value] = data
return
else:
if value == len(target):
if self._isint(parts[pos + 1]):
target.append([])
else:
target.append(dict())
target = target[value]
else:
# Item before us should be a dict
if not isinstance(target, dict):
raise ValueError("Must be a dict.")
value = part
if pos == count - 1:
target[value] = data
return
else:
if not value in target:
if self._isint(parts[pos + 1]):
target[value] = []
else:
target[value] = dict()
target = target[value]
| 32.457364 | 77 | 0.455219 |
__author__ = "Brian Allen Vanderburg II"
__copyright__ = "Copyright 2016"
__license__ = "Apache License 2.0"
try:
from collections import OrderedDict as dict
except ImportError:
pass
class NestedIniParser(object):
def __init__(self, parser):
self.parser = parser
def parse(self):
results = dict()
parser = self.parser
for section in parser.sections():
target = self._get_target(results, section)
for option in parser.options(section):
self._set_value(target, option, parser.get(section, option))
return results
def _isint(self, value):
try:
result = int(value)
return True
except ValueError:
return False
def _get_target(self, results, section):
parts = section.split(".")
target = results
count = len(parts)
for pos in range(count):
part = parts[pos]
if self._isint(part):
if not isinstance(target, list):
raise ValueError("Must be a list")
value = int(part)
if value < 0 or value > len(target):
raise ValueError("Invalid index.")
if value == len(target):
if (pos == count - 1) or not self._isint(parts[pos + 1]):
target.append(dict())
else:
target.append([])
target = target[value]
else:
if not isinstance(target, dict):
raise ValueError("Must be a dict")
value = part
if not value in target:
if (pos == count - 1) or not self._isint(parts[pos + 1]):
target[value] = dict()
else:
target[value] = []
target = target[value]
if not isinstance(target, dict):
raise ValueError("Final result must be a dict.")
return target
def _set_value(self, target, name, data):
parts = name.split(".")
count = len(parts)
for pos in range(count):
part = parts[pos]
if self._isint(part):
if not isinstance(target, list):
raise ValueError("Must be a list")
value = int(part)
if value < 0 or value > len(target):
raise ValueError("Invalid index.")
if pos == count - 1:
if value == len(target):
target.append(data)
else:
target[value] = data
return
else:
if value == len(target):
if self._isint(parts[pos + 1]):
target.append([])
else:
target.append(dict())
target = target[value]
else:
if not isinstance(target, dict):
raise ValueError("Must be a dict.")
value = part
if pos == count - 1:
target[value] = data
return
else:
if not value in target:
if self._isint(parts[pos + 1]):
target[value] = []
else:
target[value] = dict()
target = target[value]
| true | true |
f73c85b00f072057f7bc0c5325184487d744b60d | 939 | py | Python | tests/test_assume_role_executor_factory.py | zulhilmizainuddin/aws-assumerole | 2aff3af378f4b81d38f89fccbf6cd0f0f3c2e181 | [
"MIT"
] | null | null | null | tests/test_assume_role_executor_factory.py | zulhilmizainuddin/aws-assumerole | 2aff3af378f4b81d38f89fccbf6cd0f0f3c2e181 | [
"MIT"
] | 1 | 2019-12-28T09:24:10.000Z | 2019-12-28T09:26:47.000Z | tests/test_assume_role_executor_factory.py | zulhilmizainuddin/aws-assumerole | 2aff3af378f4b81d38f89fccbf6cd0f0f3c2e181 | [
"MIT"
] | null | null | null | import pytest
from awsassume.assume_role_cache_executor import AssumeRoleCacheExecutor
from awsassume.assume_role_executor_factory import AssumeRoleExecutorFactory
from awsassume.assume_role_no_cache_executor import AssumeRoleNoCacheExecutor
from awsassume.data_models import CliArgs
@pytest.fixture(scope='module', params=[True, False])
def cli_args(request):
return CliArgs(role_arn='arn:aws:iam::123456789012:role/rolename',
role_session_name='sessionname',
command='aws s3 ls',
region_name='ap-southeast-1',
no_cache=request.param)
def test_get_executor(cli_args):
assume_role_executor = AssumeRoleExecutorFactory.get_executor(cli_args)
if cli_args.no_cache is True:
assert isinstance(assume_role_executor, AssumeRoleNoCacheExecutor) is True
else:
assert isinstance(assume_role_executor, AssumeRoleCacheExecutor) is True
| 37.56 | 82 | 0.757188 | import pytest
from awsassume.assume_role_cache_executor import AssumeRoleCacheExecutor
from awsassume.assume_role_executor_factory import AssumeRoleExecutorFactory
from awsassume.assume_role_no_cache_executor import AssumeRoleNoCacheExecutor
from awsassume.data_models import CliArgs
@pytest.fixture(scope='module', params=[True, False])
def cli_args(request):
return CliArgs(role_arn='arn:aws:iam::123456789012:role/rolename',
role_session_name='sessionname',
command='aws s3 ls',
region_name='ap-southeast-1',
no_cache=request.param)
def test_get_executor(cli_args):
assume_role_executor = AssumeRoleExecutorFactory.get_executor(cli_args)
if cli_args.no_cache is True:
assert isinstance(assume_role_executor, AssumeRoleNoCacheExecutor) is True
else:
assert isinstance(assume_role_executor, AssumeRoleCacheExecutor) is True
| true | true |
f73c85e55e98e970079f707dd2ff12f7bd56b852 | 3,299 | py | Python | tests/cupy_tests/cuda_tests/test_stream.py | mrkwjc/cupy | ae9705dcc8b59ed05a6c91fdfb401b71f7c2b224 | [
"MIT"
] | 1 | 2020-12-19T23:34:41.000Z | 2020-12-19T23:34:41.000Z | tests/cupy_tests/cuda_tests/test_stream.py | mrkwjc/cupy | ae9705dcc8b59ed05a6c91fdfb401b71f7c2b224 | [
"MIT"
] | null | null | null | tests/cupy_tests/cuda_tests/test_stream.py | mrkwjc/cupy | ae9705dcc8b59ed05a6c91fdfb401b71f7c2b224 | [
"MIT"
] | null | null | null | import unittest
from cupy._creation import from_data
from cupy import cuda
from cupy import testing
from cupy.testing import attr
class TestStream(unittest.TestCase):
@attr.gpu
def test_eq(self):
null0 = cuda.Stream.null
null1 = cuda.Stream(True)
null2 = cuda.Stream(True)
null3 = cuda.Stream()
self.assertEqual(null0, null1)
self.assertEqual(null1, null2)
self.assertNotEqual(null2, null3)
def check_del(self, null):
stream = cuda.Stream(null=null).use()
stream_ptr = stream.ptr
x = from_data.array([1, 2, 3])
del stream
self.assertEqual(cuda.Stream.null, cuda.get_current_stream())
# Want to test cudaStreamDestory is issued, but
# runtime.streamQuery(stream_ptr) causes SEGV. We cannot test...
del stream_ptr
del x
@attr.gpu
def test_del(self):
self.check_del(null=False)
@attr.gpu
def test_del_null(self):
self.check_del(null=True)
@attr.gpu
def test_get_and_add_callback(self):
N = 100
cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)]
if not cuda.runtime.is_hip:
stream = cuda.Stream.null
else:
# adding callbacks to the null stream in HIP would segfault...
stream = cuda.Stream()
out = []
for i in range(N):
numpy_array = cupy_arrays[i].get(stream=stream)
stream.add_callback(
lambda _, __, t: out.append(t[0]),
(i, numpy_array))
stream.synchronize()
self.assertEqual(out, list(range(N)))
@attr.gpu
def test_with_statement(self):
stream1 = cuda.Stream()
stream2 = cuda.Stream()
self.assertEqual(cuda.Stream.null, cuda.get_current_stream())
with stream1:
self.assertEqual(stream1, cuda.get_current_stream())
with stream2:
self.assertEqual(stream2, cuda.get_current_stream())
self.assertEqual(stream1, cuda.get_current_stream())
self.assertEqual(cuda.Stream.null, cuda.get_current_stream())
@attr.gpu
def test_use(self):
stream1 = cuda.Stream().use()
self.assertEqual(stream1, cuda.get_current_stream())
cuda.Stream.null.use()
self.assertEqual(cuda.Stream.null, cuda.get_current_stream())
class TestExternalStream(unittest.TestCase):
def setUp(self):
self.stream_ptr = cuda.runtime.streamCreate()
self.stream = cuda.ExternalStream(self.stream_ptr)
def tearDown(self):
cuda.runtime.streamDestroy(self.stream_ptr)
@attr.gpu
def test_get_and_add_callback(self):
N = 100
cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)]
if not cuda.runtime.is_hip:
stream = cuda.Stream.null
else:
# adding callbacks to the null stream in HIP would segfault...
stream = cuda.Stream()
out = []
for i in range(N):
numpy_array = cupy_arrays[i].get(stream=stream)
stream.add_callback(
lambda _, __, t: out.append(t[0]),
(i, numpy_array))
stream.synchronize()
self.assertEqual(out, list(range(N)))
| 29.720721 | 74 | 0.608366 | import unittest
from cupy._creation import from_data
from cupy import cuda
from cupy import testing
from cupy.testing import attr
class TestStream(unittest.TestCase):
@attr.gpu
def test_eq(self):
null0 = cuda.Stream.null
null1 = cuda.Stream(True)
null2 = cuda.Stream(True)
null3 = cuda.Stream()
self.assertEqual(null0, null1)
self.assertEqual(null1, null2)
self.assertNotEqual(null2, null3)
def check_del(self, null):
stream = cuda.Stream(null=null).use()
stream_ptr = stream.ptr
x = from_data.array([1, 2, 3])
del stream
self.assertEqual(cuda.Stream.null, cuda.get_current_stream())
del stream_ptr
del x
@attr.gpu
def test_del(self):
self.check_del(null=False)
@attr.gpu
def test_del_null(self):
self.check_del(null=True)
@attr.gpu
def test_get_and_add_callback(self):
N = 100
cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)]
if not cuda.runtime.is_hip:
stream = cuda.Stream.null
else:
stream = cuda.Stream()
out = []
for i in range(N):
numpy_array = cupy_arrays[i].get(stream=stream)
stream.add_callback(
lambda _, __, t: out.append(t[0]),
(i, numpy_array))
stream.synchronize()
self.assertEqual(out, list(range(N)))
@attr.gpu
def test_with_statement(self):
stream1 = cuda.Stream()
stream2 = cuda.Stream()
self.assertEqual(cuda.Stream.null, cuda.get_current_stream())
with stream1:
self.assertEqual(stream1, cuda.get_current_stream())
with stream2:
self.assertEqual(stream2, cuda.get_current_stream())
self.assertEqual(stream1, cuda.get_current_stream())
self.assertEqual(cuda.Stream.null, cuda.get_current_stream())
@attr.gpu
def test_use(self):
stream1 = cuda.Stream().use()
self.assertEqual(stream1, cuda.get_current_stream())
cuda.Stream.null.use()
self.assertEqual(cuda.Stream.null, cuda.get_current_stream())
class TestExternalStream(unittest.TestCase):
def setUp(self):
self.stream_ptr = cuda.runtime.streamCreate()
self.stream = cuda.ExternalStream(self.stream_ptr)
def tearDown(self):
cuda.runtime.streamDestroy(self.stream_ptr)
@attr.gpu
def test_get_and_add_callback(self):
N = 100
cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)]
if not cuda.runtime.is_hip:
stream = cuda.Stream.null
else:
stream = cuda.Stream()
out = []
for i in range(N):
numpy_array = cupy_arrays[i].get(stream=stream)
stream.add_callback(
lambda _, __, t: out.append(t[0]),
(i, numpy_array))
stream.synchronize()
self.assertEqual(out, list(range(N)))
| true | true |
f73c864b7b40a247028a17474ecec1f2a772aefc | 3,540 | py | Python | Lib/turtledemo/penrose.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 1,872 | 2015-01-02T18:56:47.000Z | 2022-03-31T07:34:39.000Z | Lib/turtledemo/penrose.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 675 | 2015-02-27T09:01:01.000Z | 2022-03-31T14:03:25.000Z | Lib/turtledemo/penrose.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 278 | 2015-01-02T03:48:20.000Z | 2022-03-29T20:40:44.000Z | #!/usr/bin/env python3
""" xturtle-example-suite:
xtx_kites_and_darts.py
Constructs two aperiodic penrose-tilings,
consisting of kites and darts, by the method
of inflation in six steps.
Starting points are the patterns "sun"
consisting of five kites and "star"
consisting of five darts.
For more information see:
http://en.wikipedia.org/wiki/Penrose_tiling
-------------------------------------------
"""
from turtle import *
from math import cos, pi
from time import clock, sleep
f = (5**0.5-1)/2.0 # (sqrt(5)-1)/2 -- golden ratio
d = 2 * cos(3*pi/10)
def kite(l):
fl = f * l
lt(36)
fd(l)
rt(108)
fd(fl)
rt(36)
fd(fl)
rt(108)
fd(l)
rt(144)
def dart(l):
fl = f * l
lt(36)
fd(l)
rt(144)
fd(fl)
lt(36)
fd(fl)
rt(144)
fd(l)
rt(144)
def inflatekite(l, n):
if n == 0:
px, py = pos()
h, x, y = int(heading()), round(px,3), round(py,3)
tiledict[(h,x,y)] = True
return
fl = f * l
lt(36)
inflatedart(fl, n-1)
fd(l)
rt(144)
inflatekite(fl, n-1)
lt(18)
fd(l*d)
rt(162)
inflatekite(fl, n-1)
lt(36)
fd(l)
rt(180)
inflatedart(fl, n-1)
lt(36)
def inflatedart(l, n):
if n == 0:
px, py = pos()
h, x, y = int(heading()), round(px,3), round(py,3)
tiledict[(h,x,y)] = False
return
fl = f * l
inflatekite(fl, n-1)
lt(36)
fd(l)
rt(180)
inflatedart(fl, n-1)
lt(54)
fd(l*d)
rt(126)
inflatedart(fl, n-1)
fd(l)
rt(144)
def draw(l, n, th=2):
clear()
l = l * f**n
shapesize(l/100.0, l/100.0, th)
for k in tiledict:
h, x, y = k
setpos(x, y)
setheading(h)
if tiledict[k]:
shape("kite")
color("black", (0, 0.75, 0))
else:
shape("dart")
color("black", (0.75, 0, 0))
stamp()
def sun(l, n):
for i in range(5):
inflatekite(l, n)
lt(72)
def star(l,n):
for i in range(5):
inflatedart(l, n)
lt(72)
def makeshapes():
tracer(0)
begin_poly()
kite(100)
end_poly()
register_shape("kite", get_poly())
begin_poly()
dart(100)
end_poly()
register_shape("dart", get_poly())
tracer(1)
def start():
reset()
ht()
pu()
makeshapes()
resizemode("user")
def test(l=200, n=4, fun=sun, startpos=(0,0), th=2):
global tiledict
goto(startpos)
setheading(0)
tiledict = {}
a = clock()
tracer(0)
fun(l, n)
b = clock()
draw(l, n, th)
tracer(1)
c = clock()
print("Calculation: %7.4f s" % (b - a))
print("Drawing: %7.4f s" % (c - b))
print("Together: %7.4f s" % (c - a))
nk = len([x for x in tiledict if tiledict[x]])
nd = len([x for x in tiledict if not tiledict[x]])
print("%d kites and %d darts = %d pieces." % (nk, nd, nk+nd))
def demo(fun=sun):
start()
for i in range(8):
a = clock()
test(300, i, fun)
b = clock()
t = b - a
if t < 2:
sleep(2 - t)
def main():
#title("Penrose-tiling with kites and darts.")
mode("logo")
bgcolor(0.3, 0.3, 0)
demo(sun)
sleep(2)
demo(star)
pencolor("black")
goto(0,-200)
pencolor(0.7,0.7,1)
write("Please wait...",
align="center", font=('Arial Black', 36, 'bold'))
test(600, 8, startpos=(70, 117))
return "Done"
if __name__ == "__main__":
msg = main()
mainloop()
| 19.450549 | 65 | 0.504802 |
from turtle import *
from math import cos, pi
from time import clock, sleep
f = (5**0.5-1)/2.0
d = 2 * cos(3*pi/10)
def kite(l):
fl = f * l
lt(36)
fd(l)
rt(108)
fd(fl)
rt(36)
fd(fl)
rt(108)
fd(l)
rt(144)
def dart(l):
fl = f * l
lt(36)
fd(l)
rt(144)
fd(fl)
lt(36)
fd(fl)
rt(144)
fd(l)
rt(144)
def inflatekite(l, n):
if n == 0:
px, py = pos()
h, x, y = int(heading()), round(px,3), round(py,3)
tiledict[(h,x,y)] = True
return
fl = f * l
lt(36)
inflatedart(fl, n-1)
fd(l)
rt(144)
inflatekite(fl, n-1)
lt(18)
fd(l*d)
rt(162)
inflatekite(fl, n-1)
lt(36)
fd(l)
rt(180)
inflatedart(fl, n-1)
lt(36)
def inflatedart(l, n):
if n == 0:
px, py = pos()
h, x, y = int(heading()), round(px,3), round(py,3)
tiledict[(h,x,y)] = False
return
fl = f * l
inflatekite(fl, n-1)
lt(36)
fd(l)
rt(180)
inflatedart(fl, n-1)
lt(54)
fd(l*d)
rt(126)
inflatedart(fl, n-1)
fd(l)
rt(144)
def draw(l, n, th=2):
clear()
l = l * f**n
shapesize(l/100.0, l/100.0, th)
for k in tiledict:
h, x, y = k
setpos(x, y)
setheading(h)
if tiledict[k]:
shape("kite")
color("black", (0, 0.75, 0))
else:
shape("dart")
color("black", (0.75, 0, 0))
stamp()
def sun(l, n):
for i in range(5):
inflatekite(l, n)
lt(72)
def star(l,n):
for i in range(5):
inflatedart(l, n)
lt(72)
def makeshapes():
tracer(0)
begin_poly()
kite(100)
end_poly()
register_shape("kite", get_poly())
begin_poly()
dart(100)
end_poly()
register_shape("dart", get_poly())
tracer(1)
def start():
reset()
ht()
pu()
makeshapes()
resizemode("user")
def test(l=200, n=4, fun=sun, startpos=(0,0), th=2):
global tiledict
goto(startpos)
setheading(0)
tiledict = {}
a = clock()
tracer(0)
fun(l, n)
b = clock()
draw(l, n, th)
tracer(1)
c = clock()
print("Calculation: %7.4f s" % (b - a))
print("Drawing: %7.4f s" % (c - b))
print("Together: %7.4f s" % (c - a))
nk = len([x for x in tiledict if tiledict[x]])
nd = len([x for x in tiledict if not tiledict[x]])
print("%d kites and %d darts = %d pieces." % (nk, nd, nk+nd))
def demo(fun=sun):
start()
for i in range(8):
a = clock()
test(300, i, fun)
b = clock()
t = b - a
if t < 2:
sleep(2 - t)
def main():
mode("logo")
bgcolor(0.3, 0.3, 0)
demo(sun)
sleep(2)
demo(star)
pencolor("black")
goto(0,-200)
pencolor(0.7,0.7,1)
write("Please wait...",
align="center", font=('Arial Black', 36, 'bold'))
test(600, 8, startpos=(70, 117))
return "Done"
if __name__ == "__main__":
msg = main()
mainloop()
| true | true |
f73c872994b227cfbf44164f73160c58f1598ca9 | 1,897 | py | Python | mongodb/mongodb-enterprise-kubernetes/samples/kubernetes-clients/python/test_mongodb_kube_client.py | smthkissinger/docker-images | 35e868295d04fa780325ada4168381f1e80e8fe4 | [
"BSD-3-Clause"
] | 63 | 2018-02-04T03:31:22.000Z | 2022-03-07T08:27:39.000Z | mongodb/mongodb-enterprise-kubernetes/samples/kubernetes-clients/python/test_mongodb_kube_client.py | smthkissinger/docker-images | 35e868295d04fa780325ada4168381f1e80e8fe4 | [
"BSD-3-Clause"
] | 3 | 2020-06-15T03:41:03.000Z | 2020-06-15T03:41:04.000Z | mongodb/mongodb-enterprise-kubernetes/samples/kubernetes-clients/python/test_mongodb_kube_client.py | smthkissinger/docker-images | 35e868295d04fa780325ada4168381f1e80e8fe4 | [
"BSD-3-Clause"
] | 40 | 2018-01-22T16:31:16.000Z | 2022-03-08T04:40:42.000Z | #!/usr/bin/env python
from __future__ import print_function
import yaml
from mongodb_kube_client import MongoDBEnterpriseKubeClient
def parse_config_file(path):
'''
Parses the config file in the given path
'''
with open(path, 'r') as parameters:
try:
return yaml.load(parameters)
except yaml.YAMLError as exc:
print("Error when loading environment variables", exc)
def main():
parameters = parse_config_file("mongodb_kube_operator.cfg")
namespace = parameters["kubernetes"]["namespace"]
om_project = parameters["ops_manager"]["project"]
om_base_url = parameters["ops_manager"]["base_url"]
om_api_user = parameters["ops_manager"]["api_user"]
om_api_key = parameters["ops_manager"]["api_key"]
# Instantiate client wrapper
kube_client = MongoDBEnterpriseKubeClient(namespace, om_api_user, om_api_key, om_project, om_base_url)
# Create a secret and config map for project
kube_client.create_secret()
kube_client.create_config_map()
# Create a standalone, replica set and sharded cluster
kube_client.deploy_standalone(mongo_version="4.0.0", name="my-standalone")
kube_client.deploy_replica_set(mongo_version="4.0.0", name="my-replica-set", members=3)
kube_client.deploy_sharded_cluster(mongo_version="4.0.0", name="my-sharded-cluster",
num_mongod_per_shard=3, num_shards=2,
num_cfg_rs_members=3, num_mongos=2)
'''
# Delete the created deployments
kube_client.delete_mongo_process(name="my-standalone", type_plural="mongodbstandalones")
kube_client.delete_mongo_process(name="my-replica-set", type_plural="mongodbreplicasets")
kube_client.delete_mongo_process(name="my-sharded-cluster", type_plural="mongodbshardedclusters")
'''
if __name__ == '__main__':
main()
| 32.706897 | 106 | 0.702688 |
from __future__ import print_function
import yaml
from mongodb_kube_client import MongoDBEnterpriseKubeClient
def parse_config_file(path):
with open(path, 'r') as parameters:
try:
return yaml.load(parameters)
except yaml.YAMLError as exc:
print("Error when loading environment variables", exc)
def main():
parameters = parse_config_file("mongodb_kube_operator.cfg")
namespace = parameters["kubernetes"]["namespace"]
om_project = parameters["ops_manager"]["project"]
om_base_url = parameters["ops_manager"]["base_url"]
om_api_user = parameters["ops_manager"]["api_user"]
om_api_key = parameters["ops_manager"]["api_key"]
kube_client = MongoDBEnterpriseKubeClient(namespace, om_api_user, om_api_key, om_project, om_base_url)
kube_client.create_secret()
kube_client.create_config_map()
kube_client.deploy_standalone(mongo_version="4.0.0", name="my-standalone")
kube_client.deploy_replica_set(mongo_version="4.0.0", name="my-replica-set", members=3)
kube_client.deploy_sharded_cluster(mongo_version="4.0.0", name="my-sharded-cluster",
num_mongod_per_shard=3, num_shards=2,
num_cfg_rs_members=3, num_mongos=2)
if __name__ == '__main__':
main()
| true | true |
f73c8789f46756698a5563a290a28cd0a4e1b770 | 7,911 | py | Python | python_code/vnev/Lib/site-packages/wsproto/events.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 3 | 2020-03-31T10:36:31.000Z | 2020-04-23T12:01:10.000Z | python_code/vnev/Lib/site-packages/wsproto/events.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 6 | 2020-09-05T01:40:23.000Z | 2022-03-12T00:40:58.000Z | python_code/vnev/Lib/site-packages/wsproto/events.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 1 | 2020-07-09T23:13:13.000Z | 2020-07-09T23:13:13.000Z | # -*- coding: utf-8 -*-
"""
wsproto/events
~~~~~~~~~~~~~~
Events that result from processing data on a WebSocket connection.
"""
from abc import ABC
from dataclasses import dataclass, field
from typing import Generic, List, Optional, Sequence, TypeVar, Union
from .extensions import Extension
from .typing import Headers
class Event(ABC):
"""
Base class for wsproto events.
"""
pass # noqa
@dataclass(frozen=True)
class Request(Event):
"""The beginning of a Websocket connection, the HTTP Upgrade request
This event is fired when a SERVER connection receives a WebSocket
handshake request (HTTP with upgrade header).
Fields:
.. attribute:: host
(Required) The hostname, or host header value.
.. attribute:: target
(Required) The request target (path and query string)
.. attribute:: extensions
The proposed extensions.
.. attribute:: extra_headers
The additional request headers, excluding extensions, host, subprotocols,
and version headers.
.. attribute:: subprotocols
A list of the subprotocols proposed in the request, as a list
of strings.
"""
host: str
target: str
extensions: Union[Sequence[Extension], Sequence[str]] = field( # type: ignore
default_factory=list
)
extra_headers: Headers = field(default_factory=list)
subprotocols: List[str] = field(default_factory=list)
@dataclass(frozen=True)
class AcceptConnection(Event):
"""The acceptance of a Websocket upgrade request.
This event is fired when a CLIENT receives an acceptance response
from a server. It is also used to accept an upgrade request when
acting as a SERVER.
Fields:
.. attribute:: extra_headers
Any additional (non websocket related) headers present in the
acceptance response.
.. attribute:: subprotocol
The accepted subprotocol to use.
"""
subprotocol: Optional[str] = None
extensions: List[Extension] = field(default_factory=list)
extra_headers: Headers = field(default_factory=list)
@dataclass(frozen=True)
class RejectConnection(Event):
"""The rejection of a Websocket upgrade request, the HTTP response.
The ``RejectConnection`` event sends the appropriate HTTP headers to
communicate to the peer that the handshake has been rejected. You may also
send an HTTP body by setting the ``has_body`` attribute to ``True`` and then
sending one or more :class:`RejectData` events after this one. When sending
a response body, the caller should set the ``Content-Length``,
``Content-Type``, and/or ``Transfer-Encoding`` headers as appropriate.
When receiving a ``RejectConnection`` event, the ``has_body`` attribute will
in almost all cases be ``True`` (even if the server set it to ``False``) and
will be followed by at least one ``RejectData`` events, even though the data
itself might be just ``b""``. (The only scenario in which the caller
receives a ``RejectConnection`` with ``has_body == False`` is if the peer
violates sends an informational status code (1xx) other than 101.)
The ``has_body`` attribute should only be used when receiving the event. (It
has ) is False the headers must include a
content-length or transfer encoding.
Fields:
.. attribute:: headers (Headers)
The headers to send with the response.
.. attribute:: has_body
This defaults to False, but set to True if there is a body. See
also :class:`~RejectData`.
.. attribute:: status_code
The response status code.
"""
status_code: int = 400
headers: Headers = field(default_factory=list)
has_body: bool = False
@dataclass(frozen=True)
class RejectData(Event):
"""The rejection HTTP response body.
The caller may send multiple ``RejectData`` events. The final event should
have the ``body_finished`` attribute set to ``True``.
Fields:
.. attribute:: body_finished
True if this is the final chunk of the body data.
.. attribute:: data (bytes)
(Required) The raw body data.
"""
data: bytes
body_finished: bool = True
@dataclass(frozen=True)
class CloseConnection(Event):
"""The end of a Websocket connection, represents a closure frame.
**wsproto does not automatically send a response to a close event.** To
comply with the RFC you MUST send a close event back to the remote WebSocket
if you have not already sent one. The :meth:`response` method provides a
suitable event for this purpose, and you should check if a response needs
to be sent by checking :func:`wsproto.WSConnection.state`.
Fields:
.. attribute:: code
(Required) The integer close code to indicate why the connection
has closed.
.. attribute:: reason
Additional reasoning for why the connection has closed.
"""
code: int
reason: Optional[str] = None
def response(self) -> "CloseConnection":
""" Generate an RFC-compliant close frame to send back to the peer. """
return CloseConnection(code=self.code, reason=self.reason)
T = TypeVar("T", bytes, str)
@dataclass(frozen=True)
class Message(Event, Generic[T]):
"""The websocket data message.
Fields:
.. attribute:: data
(Required) The message data as byte string, can be decoded as UTF-8 for
TEXT messages. This only represents a single chunk of data and
not a full WebSocket message. You need to buffer and
reassemble these chunks to get the full message.
.. attribute:: frame_finished
This has no semantic content, but is provided just in case some
weird edge case user wants to be able to reconstruct the
fragmentation pattern of the original stream.
.. attribute:: message_finished
True if this frame is the last one of this message, False if
more frames are expected.
"""
data: T
frame_finished: bool = True
message_finished: bool = True
@dataclass(frozen=True)
class TextMessage(Message[str]):
"""This event is fired when a data frame with TEXT payload is received.
Fields:
.. attribute:: data
The message data as string, This only represents a single chunk
of data and not a full WebSocket message. You need to buffer
and reassemble these chunks to get the full message.
"""
# https://github.com/python/mypy/issues/5744
data: str
@dataclass(frozen=True)
class BytesMessage(Message[bytes]):
"""This event is fired when a data frame with BINARY payload is
received.
Fields:
.. attribute:: data
The message data as byte string, can be decoded as UTF-8 for
TEXT messages. This only represents a single chunk of data and
not a full WebSocket message. You need to buffer and
reassemble these chunks to get the full message.
"""
# https://github.com/python/mypy/issues/5744
data: bytes
@dataclass(frozen=True)
class Ping(Event):
"""The Ping event can be sent to trigger a ping frame and is fired
when a Ping is received.
**wsproto does not automatically send a pong response to a ping event.** To
comply with the RFC you MUST send a pong even as soon as is practical. The
:meth:`response` method provides a suitable event for this purpose.
Fields:
.. attribute:: payload
An optional payload to emit with the ping frame.
"""
payload: bytes = b""
def response(self) -> "Pong":
""" Generate an RFC-compliant :class:`Pong` response to this ping. """
return Pong(payload=self.payload)
@dataclass(frozen=True)
class Pong(Event):
"""The Pong event is fired when a Pong is received.
Fields:
.. attribute:: payload
An optional payload to emit with the pong frame.
"""
payload: bytes = b""
| 26.636364 | 82 | 0.680571 |
from abc import ABC
from dataclasses import dataclass, field
from typing import Generic, List, Optional, Sequence, TypeVar, Union
from .extensions import Extension
from .typing import Headers
class Event(ABC):
pass
@dataclass(frozen=True)
class Request(Event):
host: str
target: str
extensions: Union[Sequence[Extension], Sequence[str]] = field(
default_factory=list
)
extra_headers: Headers = field(default_factory=list)
subprotocols: List[str] = field(default_factory=list)
@dataclass(frozen=True)
class AcceptConnection(Event):
subprotocol: Optional[str] = None
extensions: List[Extension] = field(default_factory=list)
extra_headers: Headers = field(default_factory=list)
@dataclass(frozen=True)
class RejectConnection(Event):
status_code: int = 400
headers: Headers = field(default_factory=list)
has_body: bool = False
@dataclass(frozen=True)
class RejectData(Event):
data: bytes
body_finished: bool = True
@dataclass(frozen=True)
class CloseConnection(Event):
code: int
reason: Optional[str] = None
def response(self) -> "CloseConnection":
return CloseConnection(code=self.code, reason=self.reason)
T = TypeVar("T", bytes, str)
@dataclass(frozen=True)
class Message(Event, Generic[T]):
data: T
frame_finished: bool = True
message_finished: bool = True
@dataclass(frozen=True)
class TextMessage(Message[str]):
data: str
@dataclass(frozen=True)
class BytesMessage(Message[bytes]):
data: bytes
@dataclass(frozen=True)
class Ping(Event):
payload: bytes = b""
def response(self) -> "Pong":
return Pong(payload=self.payload)
@dataclass(frozen=True)
class Pong(Event):
payload: bytes = b""
| true | true |
f73c87f94ecae75b4b0455ff5a7938003fa96dba | 2,167 | py | Python | homeassistant/components/metoffice/__init__.py | mikan-megane/core | 837220cce40890e296920d33a623adbc11bd15a6 | [
"Apache-2.0"
] | 5 | 2018-10-23T14:15:05.000Z | 2021-11-26T06:38:44.000Z | homeassistant/components/metoffice/__init__.py | mikan-megane/core | 837220cce40890e296920d33a623adbc11bd15a6 | [
"Apache-2.0"
] | 79 | 2020-07-23T07:13:37.000Z | 2022-03-22T06:02:37.000Z | homeassistant/components/metoffice/__init__.py | mikan-megane/core | 837220cce40890e296920d33a623adbc11bd15a6 | [
"Apache-2.0"
] | 2 | 2019-06-11T12:13:14.000Z | 2020-12-24T23:17:53.000Z | """The Met Office integration."""
import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
DEFAULT_SCAN_INTERVAL,
DOMAIN,
METOFFICE_COORDINATOR,
METOFFICE_DATA,
METOFFICE_NAME,
)
from .data import MetOfficeData
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "weather"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a Met Office entry."""
latitude = entry.data[CONF_LATITUDE]
longitude = entry.data[CONF_LONGITUDE]
api_key = entry.data[CONF_API_KEY]
site_name = entry.data[CONF_NAME]
metoffice_data = MetOfficeData(hass, api_key, latitude, longitude)
await metoffice_data.async_update_site()
if metoffice_data.site_name is None:
raise ConfigEntryNotReady()
metoffice_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"MetOffice Coordinator for {site_name}",
update_method=metoffice_data.async_update,
update_interval=DEFAULT_SCAN_INTERVAL,
)
metoffice_hass_data = hass.data.setdefault(DOMAIN, {})
metoffice_hass_data[entry.entry_id] = {
METOFFICE_DATA: metoffice_data,
METOFFICE_COORDINATOR: metoffice_coordinator,
METOFFICE_NAME: site_name,
}
# Fetch initial data so we have data when entities subscribe
await metoffice_coordinator.async_refresh()
if metoffice_data.now is None:
raise ConfigEntryNotReady()
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
| 30.521127 | 86 | 0.741578 |
import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
DEFAULT_SCAN_INTERVAL,
DOMAIN,
METOFFICE_COORDINATOR,
METOFFICE_DATA,
METOFFICE_NAME,
)
from .data import MetOfficeData
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "weather"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
latitude = entry.data[CONF_LATITUDE]
longitude = entry.data[CONF_LONGITUDE]
api_key = entry.data[CONF_API_KEY]
site_name = entry.data[CONF_NAME]
metoffice_data = MetOfficeData(hass, api_key, latitude, longitude)
await metoffice_data.async_update_site()
if metoffice_data.site_name is None:
raise ConfigEntryNotReady()
metoffice_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"MetOffice Coordinator for {site_name}",
update_method=metoffice_data.async_update,
update_interval=DEFAULT_SCAN_INTERVAL,
)
metoffice_hass_data = hass.data.setdefault(DOMAIN, {})
metoffice_hass_data[entry.entry_id] = {
METOFFICE_DATA: metoffice_data,
METOFFICE_COORDINATOR: metoffice_coordinator,
METOFFICE_NAME: site_name,
}
await metoffice_coordinator.async_refresh()
if metoffice_data.now is None:
raise ConfigEntryNotReady()
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
| true | true |
f73c8971202e7bf69a71d87fa015a6e1846fd431 | 3,202 | py | Python | docs/source/conf.py | stetsonrowles/QSDsan | a74949fcf9e6ff91e9160a75bedaf6fab2191efb | [
"NCSA",
"CNRI-Python",
"FTL"
] | null | null | null | docs/source/conf.py | stetsonrowles/QSDsan | a74949fcf9e6ff91e9160a75bedaf6fab2191efb | [
"NCSA",
"CNRI-Python",
"FTL"
] | null | null | null | docs/source/conf.py | stetsonrowles/QSDsan | a74949fcf9e6ff91e9160a75bedaf6fab2191efb | [
"NCSA",
"CNRI-Python",
"FTL"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os, sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../../tmo'))
sys.path.insert(0, os.path.abspath('../../../bst'))
del os, sys
# -- Project information -----------------------------------------------------
import time, qsdsan
project = 'QSDsan'
author = 'Quantitative Sustainable Design Group'
copyright = f'2020-{time.gmtime().tm_year}, Quantitative Sustainable Design Group'
# version = qsdsan.__version__
# The full version, including alpha/beta/rc tags
release = '0.0.1' if not qsdsan.__version__ else qsdsan.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'nbsphinx',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
]
# Allow exceptions to occur in notebooks
nbsphinx_allow_errors = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'manni'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_css_files = ['css/qsdsan.css']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# -- Extension settings -------------------------------------------------------
# napoleon_custom_sections = [
# 'Reference documents']
# -- External mapping -------------------------------------------------------
intersphinx_mapping = {
'biosteam': ('https://biosteam.readthedocs.io/en/latest', None),
'thermosteam': ('https://thermosteam.readthedocs.io/en/latest', None),
'BioSTEAM': ('https://biosteam.readthedocs.io/en/latest', None),
'Thermosteam': ('https://thermosteam.readthedocs.io/en/latest', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'SALib': ('https://salib.readthedocs.io/en/latest', None),
} | 37.670588 | 82 | 0.661462 |
import os, sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../../tmo'))
sys.path.insert(0, os.path.abspath('../../../bst'))
del os, sys
import time, qsdsan
project = 'QSDsan'
author = 'Quantitative Sustainable Design Group'
copyright = f'2020-{time.gmtime().tm_year}, Quantitative Sustainable Design Group'
release = '0.0.1' if not qsdsan.__version__ else qsdsan.__version__
extensions = [
'nbsphinx',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
]
nbsphinx_allow_errors = True
templates_path = ['_templates']
exclude_patterns = []
pygments_style = 'manni'
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_css_files = ['css/qsdsan.css']
intersphinx_mapping = {
'biosteam': ('https://biosteam.readthedocs.io/en/latest', None),
'thermosteam': ('https://thermosteam.readthedocs.io/en/latest', None),
'BioSTEAM': ('https://biosteam.readthedocs.io/en/latest', None),
'Thermosteam': ('https://thermosteam.readthedocs.io/en/latest', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'SALib': ('https://salib.readthedocs.io/en/latest', None),
} | true | true |
f73c8a03b2f773254414f603801062cfd6e06cc4 | 180 | py | Python | ai_starter/webapp.py | karthikreddykuna/DXC_AISTARTER_LOWCODE_NOCODE | 998564caf5b858bf5b4fd29af7e129a14ec9c61c | [
"Apache-2.0"
] | null | null | null | ai_starter/webapp.py | karthikreddykuna/DXC_AISTARTER_LOWCODE_NOCODE | 998564caf5b858bf5b4fd29af7e129a14ec9c61c | [
"Apache-2.0"
] | null | null | null | ai_starter/webapp.py | karthikreddykuna/DXC_AISTARTER_LOWCODE_NOCODE | 998564caf5b858bf5b4fd29af7e129a14ec9c61c | [
"Apache-2.0"
] | null | null | null | # Entry point for the application.
from . import app # For application discovery by the 'flask' command.
from . import dxc_app # For import side-effects of setting up routes. | 60 | 73 | 0.744444 |
from . import app
from . import dxc_app | true | true |
f73c8a06c353081ad545142699bf109c21051d5f | 88,263 | py | Python | test/functional/feature_taproot.py | widecoin-project/widecoin | 143b190a61f95a4b7d40c5da484cdde8f0c5ac3f | [
"MIT"
] | 8 | 2021-04-17T16:11:50.000Z | 2021-06-23T05:30:39.000Z | test/functional/feature_taproot.py | widecoin-project/widecoin | 143b190a61f95a4b7d40c5da484cdde8f0c5ac3f | [
"MIT"
] | 1 | 2021-04-18T11:57:59.000Z | 2021-04-18T11:57:59.000Z | test/functional/feature_taproot.py | widecoin-project/widecoin | 143b190a61f95a4b7d40c5da484cdde8f0c5ac3f | [
"MIT"
] | 7 | 2021-04-17T16:04:12.000Z | 2021-06-10T00:54:53.000Z | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Widecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test Taproot softfork (BIPs 340-342)
from test_framework.blocktools import (
COINBASE_MATURITY,
create_coinbase,
create_block,
add_witness_commitment,
MAX_BLOCK_SIGOPS_WEIGHT,
NORMAL_GBT_REQUEST_PARAMS,
WITNESS_SCALE_FACTOR,
)
from test_framework.messages import (
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
)
from test_framework.script import (
ANNEX_TAG,
CScript,
CScriptNum,
CScriptOp,
LEAF_VERSION_TAPSCRIPT,
LegacySignatureHash,
LOCKTIME_THRESHOLD,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_16,
OP_2DROP,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGADD,
OP_CHECKSIGVERIFY,
OP_CODESEPARATOR,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_IF,
OP_NOP,
OP_NOT,
OP_NOTIF,
OP_PUSHDATA1,
OP_RETURN,
OP_SWAP,
OP_VERIFY,
SIGHASH_DEFAULT,
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY,
SegwitV0SignatureHash,
TaprootSignatureHash,
is_op_success,
taproot_construct,
)
from test_framework.script_util import (
key_to_p2wpkh_script,
keyhash_to_p2pkh_script,
script_to_p2sh_script,
script_to_p2wsh_script,
)
from test_framework.test_framework import WidecoinTestFramework
from test_framework.util import assert_raises_rpc_error, assert_equal
from test_framework.key import generate_privkey, compute_xonly_pubkey, sign_schnorr, tweak_add_privkey, ECKey
from test_framework.address import (
hash160,
)
from collections import OrderedDict, namedtuple
from io import BytesIO
import json
import hashlib
import os
import random
# === Framework for building spending transactions. ===
#
# The computation is represented as a "context" dict, whose entries store potentially-unevaluated expressions that
# refer to lower-level ones. By overwriting these expression, many aspects - both high and low level - of the signing
# process can be overridden.
#
# Specifically, a context object is a dict that maps names to compositions of:
# - values
# - lists of values
# - callables which, when fed the context object as argument, produce any of these
#
# The DEFAULT_CONTEXT object specifies a standard signing process, with many overridable knobs.
#
# The get(ctx, name) function can evaluate a name, and cache its result in the context.
# getter(name) can be used to construct a callable that evaluates name. For example:
#
# ctx1 = {**DEFAULT_CONTEXT, inputs=[getter("sign"), b'\x01']}
#
# creates a context where the script inputs are a signature plus the bytes 0x01.
#
# override(expr, name1=expr1, name2=expr2, ...) can be used to cause an expression to be evaluated in a selectively
# modified context. For example:
#
# ctx2 = {**DEFAULT_CONTEXT, sighash=override(default_sighash, hashtype=SIGHASH_DEFAULT)}
#
# creates a context ctx2 where the sighash is modified to use hashtype=SIGHASH_DEFAULT. This differs from
#
# ctx3 = {**DEFAULT_CONTEXT, hashtype=SIGHASH_DEFAULT}
#
# in that ctx3 will globally use hashtype=SIGHASH_DEFAULT (including in the hashtype byte appended to the signature)
# while ctx2 only uses the modified hashtype inside the sighash calculation.
def deep_eval(ctx, expr):
"""Recursively replace any callables c in expr (including inside lists) with c(ctx)."""
while callable(expr):
expr = expr(ctx)
if isinstance(expr, list):
expr = [deep_eval(ctx, x) for x in expr]
return expr
# Data type to represent fully-evaluated expressions in a context dict (so we can avoid reevaluating them).
Final = namedtuple("Final", "value")
def get(ctx, name):
"""Evaluate name in context ctx."""
assert name in ctx, "Missing '%s' in context" % name
expr = ctx[name]
if not isinstance(expr, Final):
# Evaluate and cache the result.
expr = Final(deep_eval(ctx, expr))
ctx[name] = expr
return expr.value
def getter(name):
"""Return a callable that evaluates name in its passed context."""
return lambda ctx: get(ctx, name)
def override(expr, **kwargs):
"""Return a callable that evaluates expr in a modified context."""
return lambda ctx: deep_eval({**ctx, **kwargs}, expr)
# === Implementations for the various default expressions in DEFAULT_CONTEXT ===
def default_hashtype(ctx):
"""Default expression for "hashtype": SIGHASH_DEFAULT for taproot, SIGHASH_ALL otherwise."""
mode = get(ctx, "mode")
if mode == "taproot":
return SIGHASH_DEFAULT
else:
return SIGHASH_ALL
def default_tapleaf(ctx):
"""Default expression for "tapleaf": looking up leaf in tap[2]."""
return get(ctx, "tap").leaves[get(ctx, "leaf")]
def default_script_taproot(ctx):
"""Default expression for "script_taproot": tapleaf.script."""
return get(ctx, "tapleaf").script
def default_leafversion(ctx):
"""Default expression for "leafversion": tapleaf.version"""
return get(ctx, "tapleaf").version
def default_negflag(ctx):
"""Default expression for "negflag": tap.negflag."""
return get(ctx, "tap").negflag
def default_pubkey_internal(ctx):
"""Default expression for "pubkey_internal": tap.internal_pubkey."""
return get(ctx, "tap").internal_pubkey
def default_merklebranch(ctx):
"""Default expression for "merklebranch": tapleaf.merklebranch."""
return get(ctx, "tapleaf").merklebranch
def default_controlblock(ctx):
"""Default expression for "controlblock": combine leafversion, negflag, pubkey_internal, merklebranch."""
return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_internal") + get(ctx, "merklebranch")
def default_sighash(ctx):
"""Default expression for "sighash": depending on mode, compute BIP341, BIP143, or legacy sighash."""
tx = get(ctx, "tx")
idx = get(ctx, "idx")
hashtype = get(ctx, "hashtype_actual")
mode = get(ctx, "mode")
if mode == "taproot":
# BIP341 signature hash
utxos = get(ctx, "utxos")
annex = get(ctx, "annex")
if get(ctx, "leaf") is not None:
codeseppos = get(ctx, "codeseppos")
leaf_ver = get(ctx, "leafversion")
script = get(ctx, "script_taproot")
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=True, script=script, leaf_ver=leaf_ver, codeseparator_pos=codeseppos, annex=annex)
else:
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=False, annex=annex)
elif mode == "witv0":
# BIP143 signature hash
scriptcode = get(ctx, "scriptcode")
utxos = get(ctx, "utxos")
return SegwitV0SignatureHash(scriptcode, tx, idx, hashtype, utxos[idx].nValue)
else:
# Pre-segwit signature hash
scriptcode = get(ctx, "scriptcode")
return LegacySignatureHash(scriptcode, tx, idx, hashtype)[0]
def default_tweak(ctx):
"""Default expression for "tweak": None if a leaf is specified, tap[0] otherwise."""
if get(ctx, "leaf") is None:
return get(ctx, "tap").tweak
return None
def default_key_tweaked(ctx):
"""Default expression for "key_tweaked": key if tweak is None, tweaked with it otherwise."""
key = get(ctx, "key")
tweak = get(ctx, "tweak")
if tweak is None:
return key
else:
return tweak_add_privkey(key, tweak)
def default_signature(ctx):
"""Default expression for "signature": BIP340 signature or ECDSA signature depending on mode."""
sighash = get(ctx, "sighash")
if get(ctx, "mode") == "taproot":
key = get(ctx, "key_tweaked")
flip_r = get(ctx, "flag_flip_r")
flip_p = get(ctx, "flag_flip_p")
return sign_schnorr(key, sighash, flip_r=flip_r, flip_p=flip_p)
else:
key = get(ctx, "key")
return key.sign_ecdsa(sighash)
def default_hashtype_actual(ctx):
"""Default expression for "hashtype_actual": hashtype, unless mismatching SIGHASH_SINGLE in taproot."""
hashtype = get(ctx, "hashtype")
mode = get(ctx, "mode")
if mode != "taproot":
return hashtype
idx = get(ctx, "idx")
tx = get(ctx, "tx")
if hashtype & 3 == SIGHASH_SINGLE and idx >= len(tx.vout):
return (hashtype & ~3) | SIGHASH_NONE
return hashtype
def default_bytes_hashtype(ctx):
"""Default expression for "bytes_hashtype": bytes([hashtype_actual]) if not 0, b"" otherwise."""
return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0])
def default_sign(ctx):
"""Default expression for "sign": concatenation of signature and bytes_hashtype."""
return get(ctx, "signature") + get(ctx, "bytes_hashtype")
def default_inputs_keypath(ctx):
"""Default expression for "inputs_keypath": a signature."""
return [get(ctx, "sign")]
def default_witness_taproot(ctx):
"""Default expression for "witness_taproot", consisting of inputs, script, control block, and annex as needed."""
annex = get(ctx, "annex")
suffix_annex = []
if annex is not None:
suffix_annex = [annex]
if get(ctx, "leaf") is None:
return get(ctx, "inputs_keypath") + suffix_annex
else:
return get(ctx, "inputs") + [bytes(get(ctx, "script_taproot")), get(ctx, "controlblock")] + suffix_annex
def default_witness_witv0(ctx):
"""Default expression for "witness_witv0", consisting of inputs and witness script, as needed."""
script = get(ctx, "script_witv0")
inputs = get(ctx, "inputs")
if script is None:
return inputs
else:
return inputs + [script]
def default_witness(ctx):
"""Default expression for "witness", delegating to "witness_taproot" or "witness_witv0" as needed."""
mode = get(ctx, "mode")
if mode == "taproot":
return get(ctx, "witness_taproot")
elif mode == "witv0":
return get(ctx, "witness_witv0")
else:
return []
def default_scriptsig(ctx):
"""Default expression for "scriptsig", consisting of inputs and redeemscript, as needed."""
scriptsig = []
mode = get(ctx, "mode")
if mode == "legacy":
scriptsig = get(ctx, "inputs")
redeemscript = get(ctx, "script_p2sh")
if redeemscript is not None:
scriptsig += [bytes(redeemscript)]
return scriptsig
# The default context object.
DEFAULT_CONTEXT = {
# == The main expressions to evaluate. Only override these for unusual or invalid spends. ==
# The overall witness stack, as a list of bytes objects.
"witness": default_witness,
# The overall scriptsig, as a list of CScript objects (to be concatenated) and bytes objects (to be pushed)
"scriptsig": default_scriptsig,
# == Expressions you'll generally only override for intentionally invalid spends. ==
# The witness stack for spending a taproot output.
"witness_taproot": default_witness_taproot,
# The witness stack for spending a P2WPKH/P2WSH output.
"witness_witv0": default_witness_witv0,
# The script inputs for a taproot key path spend.
"inputs_keypath": default_inputs_keypath,
# The actual hashtype to use (usually equal to hashtype, but in taproot SIGHASH_SINGLE is not always allowed).
"hashtype_actual": default_hashtype_actual,
# The bytes object for a full signature (including hashtype byte, if needed).
"bytes_hashtype": default_bytes_hashtype,
# A full script signature (bytes including hashtype, if needed)
"sign": default_sign,
# An ECDSA or Schnorr signature (excluding hashtype byte).
"signature": default_signature,
# The 32-byte tweaked key (equal to key for script path spends, or key+tweak for key path spends).
"key_tweaked": default_key_tweaked,
# The tweak to use (None for script path spends, the actual tweak for key path spends).
"tweak": default_tweak,
# The sighash value (32 bytes)
"sighash": default_sighash,
# The information about the chosen script path spend (TaprootLeafInfo object).
"tapleaf": default_tapleaf,
# The script to push, and include in the sighash, for a taproot script path spend.
"script_taproot": default_script_taproot,
# The internal pubkey for a taproot script path spend (32 bytes).
"pubkey_internal": default_pubkey_internal,
# The negation flag of the internal pubkey for a taproot script path spend.
"negflag": default_negflag,
# The leaf version to include in the sighash (this does not affect the one in the control block).
"leafversion": default_leafversion,
# The Merkle path to include in the control block for a script path spend.
"merklebranch": default_merklebranch,
# The control block to push for a taproot script path spend.
"controlblock": default_controlblock,
# Whether to produce signatures with invalid P sign (Schnorr signatures only).
"flag_flip_p": False,
# Whether to produce signatures with invalid R sign (Schnorr signatures only).
"flag_flip_r": False,
# == Parameters that can be changed without invalidating, but do have a default: ==
# The hashtype (as an integer).
"hashtype": default_hashtype,
# The annex (only when mode=="taproot").
"annex": None,
# The codeseparator position (only when mode=="taproot").
"codeseppos": -1,
# The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH).
"script_p2sh": None,
# The script to add to the witness in (if P2WSH; None implies P2WPKH)
"script_witv0": None,
# The leaf to use in taproot spends (if script path spend; None implies key path spend).
"leaf": None,
# The input arguments to provide to the executed script
"inputs": [],
# == Parameters to be set before evaluation: ==
# - mode: what spending style to use ("taproot", "witv0", or "legacy").
# - key: the (untweaked) private key to sign with (ECKey object for ECDSA, 32 bytes for Schnorr).
# - tap: the TaprootInfo object (see taproot_construct; needed in mode=="taproot").
# - tx: the transaction to sign.
# - utxos: the UTXOs being spent (needed in mode=="witv0" and mode=="taproot").
# - idx: the input position being signed.
# - scriptcode: the scriptcode to include in legacy and witv0 sighashes.
}
def flatten(lst):
ret = []
for elem in lst:
if isinstance(elem, list):
ret += flatten(elem)
else:
ret.append(elem)
return ret
def spend(tx, idx, utxos, **kwargs):
"""Sign transaction input idx of tx, provided utxos is the list of outputs being spent.
Additional arguments may be provided that override any aspect of the signing process.
See DEFAULT_CONTEXT above for what can be overridden, and what must be provided.
"""
ctx = {**DEFAULT_CONTEXT, "tx":tx, "idx":idx, "utxos":utxos, **kwargs}
def to_script(elem):
"""If fed a CScript, return it; if fed bytes, return a CScript that pushes it."""
if isinstance(elem, CScript):
return elem
else:
return CScript([elem])
scriptsig_list = flatten(get(ctx, "scriptsig"))
scriptsig = CScript(b"".join(bytes(to_script(elem)) for elem in scriptsig_list))
witness_stack = flatten(get(ctx, "witness"))
return (scriptsig, witness_stack)
# === Spender objects ===
#
# Each spender is a tuple of:
# - A scriptPubKey which is to be spent from (CScript)
# - A comment describing the test (string)
# - Whether the spending (on itself) is expected to be standard (bool)
# - A tx-signing lambda returning (scriptsig, witness_stack), taking as inputs:
# - A transaction to sign (CTransaction)
# - An input position (int)
# - The spent UTXOs by this transaction (list of CTxOut)
# - Whether to produce a valid spend (bool)
# - A string with an expected error message for failure case if known
# - The (pre-taproot) sigops weight consumed by a successful spend
# - Whether this spend cannot fail
# - Whether this test demands being placed in a txin with no corresponding txout (for testing SIGHASH_SINGLE behavior)
Spender = namedtuple("Spender", "script,comment,is_standard,sat_function,err_msg,sigops_weight,no_fail,need_vin_vout_mismatch")
def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=False, spk_mutate_pre_p2sh=None, failure=None, standard=True, err_msg=None, sigops_weight=0, need_vin_vout_mismatch=False, **kwargs):
"""Helper for constructing Spender objects using the context signing framework.
* tap: a TaprootInfo object (see taproot_construct), for Taproot spends (cannot be combined with pkh, witv0, or script)
* witv0: boolean indicating the use of witness v0 spending (needs one of script or pkh)
* script: the actual script executed (for bare/P2WSH/P2SH spending)
* pkh: the public key for P2PKH or P2WPKH spending
* p2sh: whether the output is P2SH wrapper (this is supported even for Taproot, where it makes the output unencumbered)
* spk_mutate_pre_psh: a callable to be applied to the script (before potentially P2SH-wrapping it)
* failure: a dict of entries to override in the context when intentionally failing to spend (if None, no_fail will be set)
* standard: whether the (valid version of) spending is expected to be standard
* err_msg: a string with an expected error message for failure (or None, if not cared about)
* sigops_weight: the pre-taproot sigops weight consumed by a successful spend
* need_vin_vout_mismatch: whether this test requires being tested in a transaction input that has no corresponding
transaction output.
"""
conf = dict()
# Compute scriptPubKey and set useful defaults based on the inputs.
if witv0:
assert tap is None
conf["mode"] = "witv0"
if pkh is not None:
# P2WPKH
assert script is None
pubkeyhash = hash160(pkh)
spk = key_to_p2wpkh_script(pkh)
conf["scriptcode"] = keyhash_to_p2pkh_script(pubkeyhash)
conf["script_witv0"] = None
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# P2WSH
spk = script_to_p2wsh_script(script)
conf["scriptcode"] = script
conf["script_witv0"] = script
else:
assert False
elif tap is None:
conf["mode"] = "legacy"
if pkh is not None:
# P2PKH
assert script is None
pubkeyhash = hash160(pkh)
spk = keyhash_to_p2pkh_script(pubkeyhash)
conf["scriptcode"] = spk
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# bare
spk = script
conf["scriptcode"] = script
else:
assert False
else:
assert script is None
conf["mode"] = "taproot"
conf["tap"] = tap
spk = tap.scriptPubKey
if spk_mutate_pre_p2sh is not None:
spk = spk_mutate_pre_p2sh(spk)
if p2sh:
# P2SH wrapper can be combined with anything else
conf["script_p2sh"] = spk
spk = script_to_p2sh_script(spk)
conf = {**conf, **kwargs}
def sat_fn(tx, idx, utxos, valid):
if valid:
return spend(tx, idx, utxos, **conf)
else:
assert failure is not None
return spend(tx, idx, utxos, **{**conf, **failure})
return Spender(script=spk, comment=comment, is_standard=standard, sat_function=sat_fn, err_msg=err_msg, sigops_weight=sigops_weight, no_fail=failure is None, need_vin_vout_mismatch=need_vin_vout_mismatch)
def add_spender(spenders, *args, **kwargs):
"""Make a spender using make_spender, and add it to spenders."""
spenders.append(make_spender(*args, **kwargs))
# === Helpers for the test ===
def random_checksig_style(pubkey):
"""Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack."""
opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD])
if opcode == OP_CHECKSIGVERIFY:
ret = CScript([pubkey, opcode, OP_1])
elif opcode == OP_CHECKSIGADD:
num = random.choice([0, 0x7fffffff, -0x7fffffff])
ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL])
else:
ret = CScript([pubkey, opcode])
return bytes(ret)
def random_bytes(n):
"""Return a random bytes object of length n."""
return bytes(random.getrandbits(8) for i in range(n))
def bitflipper(expr):
"""Return a callable that evaluates expr and returns it with a random bitflip."""
def fn(ctx):
sub = deep_eval(ctx, expr)
assert isinstance(sub, bytes)
return (int.from_bytes(sub, 'little') ^ (1 << random.randrange(len(sub) * 8))).to_bytes(len(sub), 'little')
return fn
def zero_appender(expr):
"""Return a callable that evaluates expr and returns it with a zero added."""
return lambda ctx: deep_eval(ctx, expr) + b"\x00"
def byte_popper(expr):
"""Return a callable that evaluates expr and returns it with its last byte removed."""
return lambda ctx: deep_eval(ctx, expr)[:-1]
# Expected error strings
ERR_SIG_SIZE = {"err_msg": "Invalid Schnorr signature size"}
ERR_SIG_HASHTYPE = {"err_msg": "Invalid Schnorr signature hash type"}
ERR_SIG_SCHNORR = {"err_msg": "Invalid Schnorr signature"}
ERR_OP_RETURN = {"err_msg": "OP_RETURN was encountered"}
ERR_CONTROLBLOCK_SIZE = {"err_msg": "Invalid Taproot control block size"}
ERR_WITNESS_PROGRAM_MISMATCH = {"err_msg": "Witness program hash mismatch"}
ERR_PUSH_LIMIT = {"err_msg": "Push value size limit exceeded"}
ERR_DISABLED_OPCODE = {"err_msg": "Attempted to use a disabled opcode"}
ERR_TAPSCRIPT_CHECKMULTISIG = {"err_msg": "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"}
ERR_MINIMALIF = {"err_msg": "OP_IF/NOTIF argument must be minimal in tapscript"}
ERR_UNKNOWN_PUBKEY = {"err_msg": "Public key is neither compressed or uncompressed"}
ERR_STACK_SIZE = {"err_msg": "Stack size limit exceeded"}
ERR_CLEANSTACK = {"err_msg": "Stack size must be exactly one after execution"}
ERR_STACK_EMPTY = {"err_msg": "Operation not valid with the current stack size"}
ERR_SIGOPS_RATIO = {"err_msg": "Too much signature validation relative to witness weight"}
ERR_UNDECODABLE = {"err_msg": "Opcode missing or not understood"}
ERR_NO_SUCCESS = {"err_msg": "Script evaluated without error but finished with a false/empty top stack element"}
ERR_EMPTY_WITNESS = {"err_msg": "Witness program was passed an empty witness"}
ERR_CHECKSIGVERIFY = {"err_msg": "Script failed an OP_CHECKSIGVERIFY operation"}
VALID_SIGHASHES_ECDSA = [
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_ALL,
SIGHASH_ANYONECANPAY + SIGHASH_NONE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT = [SIGHASH_DEFAULT] + VALID_SIGHASHES_ECDSA
VALID_SIGHASHES_TAPROOT_SINGLE = [
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT_NO_SINGLE = [h for h in VALID_SIGHASHES_TAPROOT if h not in VALID_SIGHASHES_TAPROOT_SINGLE]
SIGHASH_BITFLIP = {"failure": {"sighash": bitflipper(default_sighash)}}
SIG_POP_BYTE = {"failure": {"sign": byte_popper(default_sign)}}
SINGLE_SIG = {"inputs": [getter("sign")]}
SIG_ADD_ZERO = {"failure": {"sign": zero_appender(default_sign)}}
DUST_LIMIT = 600
MIN_FEE = 50000
# === Actual test cases ===
def spenders_taproot_active():
"""Return a list of Spenders for testing post-Taproot activation behavior."""
secs = [generate_privkey() for _ in range(8)]
pubs = [compute_xonly_pubkey(sec)[0] for sec in secs]
spenders = []
# == Tests for BIP340 signature validation. ==
# These are primarily tested through the test vectors implemented in libsecp256k1, and in src/tests/key_tests.cpp.
# Some things are tested programmatically as well here.
tap = taproot_construct(pubs[0])
# Test with key with bit flipped.
add_spender(spenders, "sig/key", tap=tap, key=secs[0], failure={"key_tweaked": bitflipper(default_key_tweaked)}, **ERR_SIG_SCHNORR)
# Test with sighash with bit flipped.
add_spender(spenders, "sig/sighash", tap=tap, key=secs[0], failure={"sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# Test with invalid R sign.
add_spender(spenders, "sig/flip_r", tap=tap, key=secs[0], failure={"flag_flip_r": True}, **ERR_SIG_SCHNORR)
# Test with invalid P sign.
add_spender(spenders, "sig/flip_p", tap=tap, key=secs[0], failure={"flag_flip_p": True}, **ERR_SIG_SCHNORR)
# Test with signature with bit flipped.
add_spender(spenders, "sig/bitflip", tap=tap, key=secs[0], failure={"signature": bitflipper(default_signature)}, **ERR_SIG_SCHNORR)
# == Tests for signature hashing ==
# Run all tests once with no annex, and once with a valid random annex.
for annex in [None, lambda _: bytes([ANNEX_TAG]) + random_bytes(random.randrange(0, 250))]:
# Non-empty annex is non-standard
no_annex = annex is None
# Sighash mutation tests (test all sighash combinations)
for hashtype in VALID_SIGHASHES_TAPROOT:
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
# Pure pubkey
tap = taproot_construct(pubs[0])
add_spender(spenders, "sighash/purepk", tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Pubkey/P2PK script combination
scripts = [("s0", CScript(random_checksig_style(pubs[1])))]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/keypath_hashtype_%x" % hashtype, tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath_hashtype_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Test SIGHASH_SINGLE behavior in combination with mismatching outputs
if hashtype in VALID_SIGHASHES_TAPROOT_SINGLE:
add_spender(spenders, "sighash/keypath_hashtype_mis_%x" % hashtype, tap=tap, key=secs[0], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
add_spender(spenders, "sighash/scriptpath_hashtype_mis_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), **SINGLE_SIG, failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
# Test OP_CODESEPARATOR impact on sighashing.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
scripts = [
("pk_codesep", CScript(random_checksig_style(pubs[1]) + bytes([OP_CODESEPARATOR]))), # codesep after checksig
("codesep_pk", CScript(bytes([OP_CODESEPARATOR]) + random_checksig_style(pubs[1]))), # codesep before checksig
("branched_codesep", CScript([random_bytes(random.randrange(511)), OP_DROP, OP_IF, OP_CODESEPARATOR, pubs[0], OP_ELSE, OP_CODESEPARATOR, pubs[1], OP_ENDIF, OP_CHECKSIG])), # branch dependent codesep
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/pk_codesep", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/codesep_pk", tap=tap, leaf="codesep_pk", key=secs[1], codeseppos=0, **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/left", tap=tap, leaf="branched_codesep", key=secs[0], codeseppos=3, **common, inputs=[getter("sign"), b'\x01'], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/right", tap=tap, leaf="branched_codesep", key=secs[1], codeseppos=6, **common, inputs=[getter("sign"), b''], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Reusing the scripts above, test that various features affect the sighash.
add_spender(spenders, "sighash/annex", tap=tap, leaf="pk_codesep", key=secs[1], hashtype=hashtype, standard=False, **SINGLE_SIG, annex=bytes([ANNEX_TAG]), failure={"sighash": override(default_sighash, annex=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/script", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, script_taproot=tap.leaves["codesep_pk"].script)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE != 0xC0]))}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leaf=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/keypath", tap=tap, key=secs[0], **common, failure={"sighash": override(default_sighash, leaf="pk_codesep")}, **ERR_SIG_SCHNORR)
# Test that invalid hashtypes don't work, both in key path and script path spends
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for invalid_hashtype in [x for x in range(0x100) if x not in VALID_SIGHASHES_TAPROOT]:
add_spender(spenders, "sighash/keypath_unk_hashtype_%x" % invalid_hashtype, tap=tap, key=secs[0], hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/scriptpath_unk_hashtype_%x" % invalid_hashtype, tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
# Test that hashtype 0 cannot have a hashtype byte, and 1 must have one.
add_spender(spenders, "sighash/hashtype0_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype0_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype1_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test that hashtype 0 and hashtype 1 cannot be transmuted into each other.
add_spender(spenders, "sighash/hashtype0to1_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype0to1_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test aspects of signatures with unusual lengths
for hashtype in [SIGHASH_DEFAULT, random.choice(VALID_SIGHASHES_TAPROOT)]:
scripts = [
("csv", CScript([pubs[2], OP_CHECKSIGVERIFY, OP_1])),
("cs_pos", CScript([pubs[2], OP_CHECKSIG])),
("csa_pos", CScript([OP_0, pubs[2], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
("cs_neg", CScript([pubs[2], OP_CHECKSIG, OP_NOT])),
("csa_neg", CScript([OP_2, pubs[2], OP_CHECKSIGADD, OP_2, OP_EQUAL]))
]
random.shuffle(scripts)
tap = taproot_construct(pubs[3], scripts)
# Empty signatures
add_spender(spenders, "siglen/empty_keypath", tap=tap, key=secs[3], hashtype=hashtype, failure={"sign": b""}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_CHECKSIGVERIFY)
add_spender(spenders, "siglen/empty_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(1, 63))}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(66, 100))}, **ERR_SIG_SIZE)
# Appending a zero byte to signatures invalidates them
add_spender(spenders, "siglen/padzero_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
# Removing the last byte from signatures invalidates them
add_spender(spenders, "siglen/popbyte_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
# Verify that an invalid signature is not allowed, not even when the CHECKSIG* is expected to fail.
add_spender(spenders, "siglen/invalid_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "siglen/invalid_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# == Test that BIP341 spending only applies to witness version 1, program length 32, no P2SH ==
for p2sh in [False, True]:
for witver in range(1, 17):
for witlen in [20, 31, 32, 33]:
def mutate(spk):
prog = spk[2:]
assert len(prog) == 32
if witlen < 32:
prog = prog[0:witlen]
elif witlen > 32:
prog += bytes([0 for _ in range(witlen - 32)])
return CScript([CScriptOp.encode_op_n(witver), prog])
scripts = [("s0", CScript([pubs[0], OP_CHECKSIG])), ("dummy", CScript([OP_RETURN]))]
tap = taproot_construct(pubs[1], scripts)
if not p2sh and witver == 1 and witlen == 32:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, failure={"leaf": "dummy"}, **ERR_OP_RETURN)
else:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], standard=False)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, standard=False)
# == Test various aspects of BIP341 spending paths ==
# A set of functions that compute the hashing partner in a Merkle tree, designed to exercise
# edge cases. This relies on the taproot_construct feature that a lambda can be passed in
# instead of a subtree, to compute the partner to be hashed with.
PARTNER_MERKLE_FN = [
# Combine with itself
lambda h: h,
# Combine with hash 0
lambda h: bytes([0 for _ in range(32)]),
# Combine with hash 2^256-1
lambda h: bytes([0xff for _ in range(32)]),
# Combine with itself-1 (BE)
lambda h: (int.from_bytes(h, 'big') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (BE)
lambda h: (int.from_bytes(h, 'big') + 1).to_bytes(32, 'big'),
# Combine with itself-1 (LE)
lambda h: (int.from_bytes(h, 'little') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (LE)
lambda h: (int.from_bytes(h, 'little') + 1).to_bytes(32, 'little'),
# Combine with random bitflipped version of self.
lambda h: (int.from_bytes(h, 'little') ^ (1 << random.randrange(256))).to_bytes(32, 'little')
]
# Start with a tree of that has depth 1 for "128deep" and depth 2 for "129deep".
scripts = [("128deep", CScript([pubs[0], OP_CHECKSIG])), [("129deep", CScript([pubs[0], OP_CHECKSIG])), random.choice(PARTNER_MERKLE_FN)]]
# Add 127 nodes on top of that tree, so that "128deep" and "129deep" end up at their designated depths.
for _ in range(127):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
# Test that spends with a depth of 128 work, but 129 doesn't (even with a tree with weird Merkle branches in it).
add_spender(spenders, "spendpath/merklelimit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"leaf": "129deep"}, **ERR_CONTROLBLOCK_SIZE)
# Test that flipping the negation bit invalidates spends.
add_spender(spenders, "spendpath/negflag", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"negflag": lambda ctx: 1 - default_negflag(ctx)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the Merkle branch invalidate it.
add_spender(spenders, "spendpath/bitflipmerkle", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"merklebranch": bitflipper(default_merklebranch)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the internal pubkey invalidate it.
add_spender(spenders, "spendpath/bitflippubkey", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"pubkey_internal": bitflipper(default_pubkey_internal)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that empty witnesses are invalid.
add_spender(spenders, "spendpath/emptywit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"witness": []}, **ERR_EMPTY_WITNESS)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padlongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/trunclongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
scripts = [("s", CScript([pubs[0], OP_CHECKSIG]))]
tap = taproot_construct(pubs[1], scripts)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/truncshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block to 1 byte ("-1 Merkle length") invalidates it
add_spender(spenders, "spendpath/trunc1shortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:1]}, **ERR_CONTROLBLOCK_SIZE)
# == Test BIP342 edge cases ==
csa_low_val = random.randrange(0, 17) # Within range for OP_n
csa_low_result = csa_low_val + 1
csa_high_val = random.randrange(17, 100) if random.getrandbits(1) else random.randrange(-100, -1) # Outside OP_n range
csa_high_result = csa_high_val + 1
OVERSIZE_NUMBER = 2**31
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER))), 6)
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER-1))), 5)
big_choices = []
big_scriptops = []
for i in range(1000):
r = random.randrange(len(pubs))
big_choices.append(r)
big_scriptops += [pubs[r], OP_CHECKSIGVERIFY]
def big_spend_inputs(ctx):
"""Helper function to construct the script input for t33/t34 below."""
# Instead of signing 999 times, precompute signatures for every (key, hashtype) combination
sigs = {}
for ht in VALID_SIGHASHES_TAPROOT:
for k in range(len(pubs)):
sigs[(k, ht)] = override(default_sign, hashtype=ht, key=secs[k])(ctx)
num = get(ctx, "num")
return [sigs[(big_choices[i], random.choice(VALID_SIGHASHES_TAPROOT))] for i in range(num - 1, -1, -1)]
# Various BIP342 features
scripts = [
# 0) drop stack element and OP_CHECKSIG
("t0", CScript([OP_DROP, pubs[1], OP_CHECKSIG])),
# 1) normal OP_CHECKSIG
("t1", CScript([pubs[1], OP_CHECKSIG])),
# 2) normal OP_CHECKSIGVERIFY
("t2", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 3) Hypothetical OP_CHECKMULTISIG script that takes a single sig as input
("t3", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIG])),
# 4) Hypothetical OP_CHECKMULTISIGVERIFY script that takes a single sig as input
("t4", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIGVERIFY, OP_1])),
# 5) OP_IF script that needs a true input
("t5", CScript([OP_IF, pubs[1], OP_CHECKSIG, OP_ELSE, OP_RETURN, OP_ENDIF])),
# 6) OP_NOTIF script that needs a true input
("t6", CScript([OP_NOTIF, OP_RETURN, OP_ELSE, pubs[1], OP_CHECKSIG, OP_ENDIF])),
# 7) OP_CHECKSIG with an empty key
("t7", CScript([OP_0, OP_CHECKSIG])),
# 8) OP_CHECKSIGVERIFY with an empty key
("t8", CScript([OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 9) normal OP_CHECKSIGADD that also ensures return value is correct
("t9", CScript([csa_low_val, pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 10) OP_CHECKSIGADD with empty key
("t10", CScript([csa_low_val, OP_0, OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 11) OP_CHECKSIGADD with missing counter stack element
("t11", CScript([pubs[1], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
# 12) OP_CHECKSIG that needs invalid signature
("t12", CScript([pubs[1], OP_CHECKSIGVERIFY, pubs[0], OP_CHECKSIG, OP_NOT])),
# 13) OP_CHECKSIG with empty key that needs invalid signature
("t13", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_CHECKSIG, OP_NOT])),
# 14) OP_CHECKSIGADD that needs invalid signature
("t14", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, pubs[0], OP_CHECKSIGADD, OP_NOT])),
# 15) OP_CHECKSIGADD with empty key that needs invalid signature
("t15", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGADD, OP_NOT])),
# 16) OP_CHECKSIG with unknown pubkey type
("t16", CScript([OP_1, OP_CHECKSIG])),
# 17) OP_CHECKSIGADD with unknown pubkey type
("t17", CScript([OP_0, OP_1, OP_CHECKSIGADD])),
# 18) OP_CHECKSIGVERIFY with unknown pubkey type
("t18", CScript([OP_1, OP_CHECKSIGVERIFY, OP_1])),
# 19) script longer than 10000 bytes and over 201 non-push opcodes
("t19", CScript([OP_0, OP_0, OP_2DROP] * 10001 + [pubs[1], OP_CHECKSIG])),
# 20) OP_CHECKSIGVERIFY with empty key
("t20", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 21) Script that grows the stack to 1000 elements
("t21", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 999 + [OP_DROP] * 999)),
# 22) Script that grows the stack to 1001 elements
("t22", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 1000 + [OP_DROP] * 1000)),
# 23) Script that expects an input stack of 1000 elements
("t23", CScript([OP_DROP] * 999 + [pubs[1], OP_CHECKSIG])),
# 24) Script that expects an input stack of 1001 elements
("t24", CScript([OP_DROP] * 1000 + [pubs[1], OP_CHECKSIG])),
# 25) Script that pushes a MAX_SCRIPT_ELEMENT_SIZE-bytes element
("t25", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE), OP_DROP, pubs[1], OP_CHECKSIG])),
# 26) Script that pushes a (MAX_SCRIPT_ELEMENT_SIZE+1)-bytes element
("t26", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, pubs[1], OP_CHECKSIG])),
# 27) CHECKSIGADD that must fail because numeric argument number is >4 bytes
("t27", CScript([CScriptNum(OVERSIZE_NUMBER), pubs[1], OP_CHECKSIGADD])),
# 28) Pushes random CScriptNum value, checks OP_CHECKSIGADD result
("t28", CScript([csa_high_val, pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 29) CHECKSIGADD that succeeds with proper sig because numeric argument number is <=4 bytes
("t29", CScript([CScriptNum(OVERSIZE_NUMBER-1), pubs[1], OP_CHECKSIGADD])),
# 30) Variant of t1 with "normal" 33-byte pubkey
("t30", CScript([b'\x03' + pubs[1], OP_CHECKSIG])),
# 31) Variant of t2 with "normal" 33-byte pubkey
("t31", CScript([b'\x02' + pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 32) Variant of t28 with "normal" 33-byte pubkey
("t32", CScript([csa_high_val, b'\x03' + pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 33) 999-of-999 multisig
("t33", CScript(big_scriptops[:1998] + [OP_1])),
# 34) 1000-of-1000 multisig
("t34", CScript(big_scriptops[:2000] + [OP_1])),
# 35) Variant of t9 that uses a non-minimally encoded input arg
("t35", CScript([bytes([csa_low_val]), pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 36) Empty script
("t36", CScript([])),
]
# Add many dummies to test huge trees
for j in range(100000):
scripts.append((None, CScript([OP_RETURN, random.randrange(100000)])))
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
common = {
"hashtype": hashtype,
"key": secs[1],
"tap": tap,
}
# Test that MAX_SCRIPT_ELEMENT_SIZE byte stack element inputs are valid, but not one more (and 80 bytes is standard but 81 is not).
add_spender(spenders, "tapscript/inputmaxlimit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE)], failure={"inputs": [getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1)]}, **ERR_PUSH_LIMIT)
add_spender(spenders, "tapscript/input80limit", leaf="t0", **common, inputs=[getter("sign"), random_bytes(80)])
add_spender(spenders, "tapscript/input81limit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(81)])
# Test that OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY cause failure, but OP_CHECKSIG and OP_CHECKSIGVERIFY work.
add_spender(spenders, "tapscript/disabled_checkmultisig", leaf="t1", **common, **SINGLE_SIG, failure={"leaf": "t3"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
add_spender(spenders, "tapscript/disabled_checkmultisigverify", leaf="t2", **common, **SINGLE_SIG, failure={"leaf": "t4"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
# Test that OP_IF and OP_NOTIF do not accept non-0x01 as truth value (the MINIMALIF rule is consensus in Tapscript)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x02']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x03']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0001']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0100']}, **ERR_MINIMALIF)
# Test that 1-byte public keys (which are unknown) are acceptable but nonstandard with unrelated signatures, but 0-byte public keys are not valid.
add_spender(spenders, "tapscript/unkpk/checksig", leaf="t16", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigadd", leaf="t17", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigverify", leaf="t18", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
# Test that 33-byte public keys (which are unknown) are acceptable but nonstandard with valid signatures, but normal pubkeys are not valid in that case.
add_spender(spenders, "tapscript/oldpk/checksig", leaf="t30", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t1"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigadd", leaf="t31", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t2"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigverify", leaf="t32", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t28"}, **ERR_SIG_SCHNORR)
# Test that 0-byte public keys are not acceptable.
add_spender(spenders, "tapscript/emptypk/checksig", leaf="t1", **SINGLE_SIG, **common, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigverify", leaf="t2", **SINGLE_SIG, **common, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t35", standard=False, **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
# Test that OP_CHECKSIGADD results are as expected
add_spender(spenders, "tapscript/checksigaddresults", leaf="t28", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
add_spender(spenders, "tapscript/checksigaddoversize", leaf="t29", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
# Test that OP_CHECKSIGADD requires 3 stack elements.
add_spender(spenders, "tapscript/checksigadd3args", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t11"}, **ERR_STACK_EMPTY)
# Test that empty signatures do not cause script failure in OP_CHECKSIG and OP_CHECKSIGADD (but do fail with empty pubkey, and do fail OP_CHECKSIGVERIFY)
add_spender(spenders, "tapscript/emptysigs/checksig", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t13"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/nochecksigverify", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t20"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/checksigadd", leaf="t14", **common, inputs=[b'', getter("sign")], failure={"leaf": "t15"}, **ERR_UNKNOWN_PUBKEY)
# Test that scripts over 10000 bytes (and over 201 non-push ops) are acceptable.
add_spender(spenders, "tapscript/no10000limit", leaf="t19", **SINGLE_SIG, **common)
# Test that a stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000stack", leaf="t21", **SINGLE_SIG, **common, failure={"leaf": "t22"}, **ERR_STACK_SIZE)
# Test that an input stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000inputs", leaf="t23", **common, inputs=[getter("sign")] + [b'' for _ in range(999)], failure={"leaf": "t24", "inputs": [getter("sign")] + [b'' for _ in range(1000)]}, **ERR_STACK_SIZE)
# Test that pushing a MAX_SCRIPT_ELEMENT_SIZE byte stack element is valid, but one longer is not.
add_spender(spenders, "tapscript/pushmaxlimit", leaf="t25", **common, **SINGLE_SIG, failure={"leaf": "t26"}, **ERR_PUSH_LIMIT)
# Test that 999-of-999 multisig works (but 1000-of-1000 triggers stack size limits)
add_spender(spenders, "tapscript/bigmulti", leaf="t33", **common, inputs=big_spend_inputs, num=999, failure={"leaf": "t34", "num": 1000}, **ERR_STACK_SIZE)
# Test that the CLEANSTACK rule is consensus critical in tapscript
add_spender(spenders, "tapscript/cleanstack", leaf="t36", tap=tap, inputs=[b'\x01'], failure={"inputs": [b'\x01', b'\x01']}, **ERR_CLEANSTACK)
# == Test for sigops ratio limit ==
# Given a number n, and a public key pk, functions that produce a (CScript, sigops). Each script takes as
# input a valid signature with the passed pk followed by a dummy push of bytes that are to be dropped, and
# will execute sigops signature checks.
SIGOPS_RATIO_SCRIPTS = [
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIG.
lambda n, pk: (CScript([OP_DROP, pk] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGVERIFY.
lambda n, pk: (CScript([OP_DROP, pk, OP_0, OP_IF, OP_2DUP, OP_CHECKSIGVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_2, OP_SWAP, OP_CHECKSIGADD, OP_3, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIG.
lambda n, pk: (CScript([random_bytes(220), OP_2DROP, pk, OP_1, OP_NOTIF, OP_2DUP, OP_CHECKSIG, OP_VERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_4, OP_SWAP, OP_CHECKSIGADD, OP_5, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGADD.
lambda n, pk: (CScript([OP_DROP, pk, OP_1, OP_IF, OP_ELSE, OP_2DUP, OP_6, OP_SWAP, OP_CHECKSIGADD, OP_7, OP_EQUALVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_8, OP_SWAP, OP_CHECKSIGADD, OP_9, OP_EQUAL]), n + 1),
# n+1 OP_CHECKSIGs, but also one OP_CHECKSIG with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, pk, OP_CHECKSIG, OP_NOT, OP_VERIFY, pk] + [OP_2DUP, OP_CHECKSIG, OP_VERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGADDs and 1 OP_CHECKSIG, but also an OP_CHECKSIGADD with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, OP_10, pk, OP_CHECKSIGADD, OP_10, OP_EQUALVERIFY, pk] + [OP_2DUP, OP_16, OP_SWAP, OP_CHECKSIGADD, b'\x11', OP_EQUALVERIFY] * n + [OP_CHECKSIG]), n + 1),
]
for annex in [None, bytes([ANNEX_TAG]) + random_bytes(random.randrange(1000))]:
for hashtype in [SIGHASH_DEFAULT, SIGHASH_ALL]:
for pubkey in [pubs[1], random_bytes(random.choice([x for x in range(2, 81) if x != 32]))]:
for fn_num, fn in enumerate(SIGOPS_RATIO_SCRIPTS):
merkledepth = random.randrange(129)
def predict_sigops_ratio(n, dummy_size):
"""Predict whether spending fn(n, pubkey) with dummy_size will pass the ratio test."""
script, sigops = fn(n, pubkey)
# Predict the size of the witness for a given choice of n
stacklen_size = 1
sig_size = 64 + (hashtype != SIGHASH_DEFAULT)
siglen_size = 1
dummylen_size = 1 + 2 * (dummy_size >= 253)
script_size = len(script)
scriptlen_size = 1 + 2 * (script_size >= 253)
control_size = 33 + 32 * merkledepth
controllen_size = 1 + 2 * (control_size >= 253)
annex_size = 0 if annex is None else len(annex)
annexlen_size = 0 if annex is None else 1 + 2 * (annex_size >= 253)
witsize = stacklen_size + sig_size + siglen_size + dummy_size + dummylen_size + script_size + scriptlen_size + control_size + controllen_size + annex_size + annexlen_size
# sigops ratio test
return witsize + 50 >= 50 * sigops
# Make sure n is high enough that with empty dummy, the script is not valid
n = 0
while predict_sigops_ratio(n, 0):
n += 1
# But allow picking a bit higher still
n += random.randrange(5)
# Now pick dummy size *just* large enough that the overall construction passes
dummylen = 0
while not predict_sigops_ratio(n, dummylen):
dummylen += 1
scripts = [("s", fn(n, pubkey)[0])]
for _ in range(merkledepth):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
standard = annex is None and dummylen <= 80 and len(pubkey) == 32
add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random_bytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random_bytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO)
# Future leaf versions
for leafver in range(0, 0x100, 2):
if leafver == LEAF_VERSION_TAPSCRIPT or leafver == ANNEX_TAG:
# Skip the defined LEAF_VERSION_TAPSCRIPT, and the ANNEX_TAG which is not usable as leaf version
continue
scripts = [
("bare_c0", CScript([OP_NOP])),
("bare_unkver", CScript([OP_NOP]), leafver),
("return_c0", CScript([OP_RETURN])),
("return_unkver", CScript([OP_RETURN]), leafver),
("undecodable_c0", CScript([OP_PUSHDATA1])),
("undecodable_unkver", CScript([OP_PUSHDATA1]), leafver),
("bigpush_c0", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP])),
("bigpush_unkver", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP]), leafver),
("1001push_c0", CScript([OP_0] * 1001)),
("1001push_unkver", CScript([OP_0] * 1001), leafver),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "unkver/bare", standard=False, tap=tap, leaf="bare_unkver", failure={"leaf": "bare_c0"}, **ERR_CLEANSTACK)
add_spender(spenders, "unkver/return", standard=False, tap=tap, leaf="return_unkver", failure={"leaf": "return_c0"}, **ERR_OP_RETURN)
add_spender(spenders, "unkver/undecodable", standard=False, tap=tap, leaf="undecodable_unkver", failure={"leaf": "undecodable_c0"}, **ERR_UNDECODABLE)
add_spender(spenders, "unkver/bigpush", standard=False, tap=tap, leaf="bigpush_unkver", failure={"leaf": "bigpush_c0"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "unkver/1001push", standard=False, tap=tap, leaf="1001push_unkver", failure={"leaf": "1001push_c0"}, **ERR_STACK_SIZE)
add_spender(spenders, "unkver/1001inputs", standard=False, tap=tap, leaf="bare_unkver", inputs=[b'']*1001, failure={"leaf": "bare_c0"}, **ERR_STACK_SIZE)
# OP_SUCCESSx tests.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for opval in range(76, 0x100):
opcode = CScriptOp(opval)
if not is_op_success(opcode):
continue
scripts = [
("bare_success", CScript([opcode])),
("bare_nop", CScript([OP_NOP])),
("unexecif_success", CScript([OP_0, OP_IF, opcode, OP_ENDIF])),
("unexecif_nop", CScript([OP_0, OP_IF, OP_NOP, OP_ENDIF])),
("return_success", CScript([OP_RETURN, opcode])),
("return_nop", CScript([OP_RETURN, OP_NOP])),
("undecodable_success", CScript([opcode, OP_PUSHDATA1])),
("undecodable_nop", CScript([OP_NOP, OP_PUSHDATA1])),
("undecodable_bypassed_success", CScript([OP_PUSHDATA1, OP_2, opcode])),
("bigpush_success", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, opcode])),
("bigpush_nop", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, OP_NOP])),
("1001push_success", CScript([OP_0] * 1001 + [opcode])),
("1001push_nop", CScript([OP_0] * 1001 + [OP_NOP])),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "opsuccess/bare", standard=False, tap=tap, leaf="bare_success", failure={"leaf": "bare_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/unexecif", standard=False, tap=tap, leaf="unexecif_success", failure={"leaf": "unexecif_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/return", standard=False, tap=tap, leaf="return_success", failure={"leaf": "return_nop"}, **ERR_OP_RETURN)
add_spender(spenders, "opsuccess/undecodable", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_nop"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/undecodable_bypass", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_bypassed_success"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/bigpush", standard=False, tap=tap, leaf="bigpush_success", failure={"leaf": "bigpush_nop"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "opsuccess/1001push", standard=False, tap=tap, leaf="1001push_success", failure={"leaf": "1001push_nop"}, **ERR_STACK_SIZE)
add_spender(spenders, "opsuccess/1001inputs", standard=False, tap=tap, leaf="bare_success", inputs=[b'']*1001, failure={"leaf": "bare_nop"}, **ERR_STACK_SIZE)
# Non-OP_SUCCESSx (verify that those aren't accidentally treated as OP_SUCCESSx)
for opval in range(0, 0x100):
opcode = CScriptOp(opval)
if is_op_success(opcode):
continue
scripts = [
("normal", CScript([OP_RETURN, opcode] + [OP_NOP] * 75)),
("op_success", CScript([OP_RETURN, CScriptOp(0x50)]))
]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "alwaysvalid/notsuccessx", tap=tap, leaf="op_success", inputs=[], standard=False, failure={"leaf": "normal"}) # err_msg differs based on opcode
# == Legacy tests ==
# Also add a few legacy spends into the mix, so that transactions which combine taproot and pre-taproot spends get tested too.
for compressed in [False, True]:
eckey1 = ECKey()
eckey1.set(generate_privkey(), compressed)
pubkey1 = eckey1.get_pubkey().get_bytes()
eckey2 = ECKey()
eckey2.set(generate_privkey(), compressed)
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = (hashtype in VALID_SIGHASHES_ECDSA) and (compressed or not witv0)
add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([pubkey1, OP_CHECKSIG]), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
add_spender(spenders, "legacy/pkh-sighashflip", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, pkh=pubkey1, key=eckey1, **SIGHASH_BITFLIP, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
# Verify that OP_CHECKSIGADD wasn't accidentally added to pre-taproot validation logic.
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0)
add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE)
return spenders
def spenders_taproot_inactive():
"""Spenders for testing that pre-activation Taproot rules don't apply."""
spenders = []
sec = generate_privkey()
pub, _ = compute_xonly_pubkey(sec)
scripts = [
("pk", CScript([pub, OP_CHECKSIG])),
("future_leaf", CScript([pub, OP_CHECKSIG]), 0xc2),
("op_success", CScript([pub, OP_CHECKSIG, OP_0, OP_IF, CScriptOp(0x50), OP_ENDIF])),
]
tap = taproot_construct(pub, scripts)
# Test that keypath spending is valid & non-standard, regardless of validity.
add_spender(spenders, "inactive/keypath_valid", key=sec, tap=tap, standard=False)
add_spender(spenders, "inactive/keypath_invalidsig", key=sec, tap=tap, standard=False, sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/keypath_empty", key=sec, tap=tap, standard=False, witness=[])
# Same for scriptpath spending (and features like annex, leaf versions, or OP_SUCCESS don't change this)
add_spender(spenders, "inactive/scriptpath_valid", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalidsig", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_invalidcb", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], controlblock=bitflipper(default_controlblock))
add_spender(spenders, "inactive/scriptpath_valid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
return spenders
# Consensus validation flags to use in dumps for tests with "legacy/" or "inactive/" prefix.
LEGACY_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY"
# Consensus validation flags to use in dumps for all other tests.
TAPROOT_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY,TAPROOT"
def dump_json_test(tx, input_utxos, idx, success, failure):
spender = input_utxos[idx].spender
# Determine flags to dump
flags = LEGACY_FLAGS if spender.comment.startswith("legacy/") or spender.comment.startswith("inactive/") else TAPROOT_FLAGS
fields = [
("tx", tx.serialize().hex()),
("prevouts", [x.output.serialize().hex() for x in input_utxos]),
("index", idx),
("flags", flags),
("comment", spender.comment)
]
# The "final" field indicates that a spend should be always valid, even with more validation flags enabled
# than the listed ones. Use standardness as a proxy for this (which gives a conservative underestimate).
if spender.is_standard:
fields.append(("final", True))
def dump_witness(wit):
return OrderedDict([("scriptSig", wit[0].hex()), ("witness", [x.hex() for x in wit[1]])])
if success is not None:
fields.append(("success", dump_witness(success)))
if failure is not None:
fields.append(("failure", dump_witness(failure)))
# Write the dump to $TEST_DUMP_DIR/x/xyz... where x,y,z,... are the SHA1 sum of the dump (which makes the
# file naming scheme compatible with fuzzing infrastructure).
dump = json.dumps(OrderedDict(fields)) + ",\n"
sha1 = hashlib.sha1(dump.encode("utf-8")).hexdigest()
dirname = os.environ.get("TEST_DUMP_DIR", ".") + ("/%s" % sha1[0])
os.makedirs(dirname, exist_ok=True)
with open(dirname + ("/%s" % sha1), 'w', encoding="utf8") as f:
f.write(dump)
# Data type to keep track of UTXOs, where they were created, and how to spend them.
UTXOData = namedtuple('UTXOData', 'outpoint,output,spender')
class TaprootTest(WidecoinTestFramework):
def add_options(self, parser):
parser.add_argument("--dumptests", dest="dump_tests", default=False, action="store_true",
help="Dump generated test cases to directory set by TEST_DUMP_DIR environment variable")
parser.add_argument("--previous_release", dest="previous_release", default=False, action="store_true",
help="Use a previous release as taproot-inactive node")
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
if self.options.previous_release:
self.skip_if_no_previous_releases()
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
# Node 0 has Taproot inactive, Node 1 active.
self.extra_args = [["-par=1"], ["-par=1"]]
if self.options.previous_release:
self.wallet_names = [None, self.default_wallet_name]
else:
self.extra_args[0].append("-vbparams=taproot:1:1")
def setup_nodes(self):
self.add_nodes(self.num_nodes, self.extra_args, versions=[
200100 if self.options.previous_release else None,
None,
])
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False):
# Deplete block of any non-tapscript sigops using a single additional 0-value coinbase output.
# It is not impossible to fit enough tapscript sigops to hit the old 80k limit without
# busting txin-level limits. We simply have to account for the p2pk outputs in all
# transactions.
extra_output_script = CScript([OP_CHECKSIG]*((MAX_BLOCK_SIGOPS_WEIGHT - sigops_weight) // WITNESS_SCALE_FACTOR))
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1, pubkey=cb_pubkey, extra_output_script=extra_output_script, fees=fees), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
block_response = node.submitblock(block.serialize().hex())
if err_msg is not None:
assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg)
if accept:
assert node.getbestblockhash() == block.hash, "Failed to accept: %s (response: %s)" % (msg, block_response)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert node.getbestblockhash() == self.lastblockhash, "Failed to reject: " + msg
def test_spenders(self, node, spenders, input_counts):
"""Run randomized tests with a number of "spenders".
Steps:
1) Generate an appropriate UTXO for each spender to test spend conditions
2) Generate 100 random addresses of all wallet types: pkh/sh_wpkh/wpkh
3) Select random number of inputs from (1)
4) Select random number of addresses from (2) as outputs
Each spender embodies a test; in a large randomized test, it is verified
that toggling the valid argument to each lambda toggles the validity of
the transaction. This is accomplished by constructing transactions consisting
of all valid inputs, except one invalid one.
"""
# Construct a bunch of sPKs that send coins back to the host wallet
self.log.info("- Constructing addresses for returning coins")
host_spks = []
host_pubkeys = []
for i in range(16):
addr = node.getnewaddress(address_type=random.choice(["legacy", "p2sh-segwit", "bech32"]))
info = node.getaddressinfo(addr)
spk = bytes.fromhex(info['scriptPubKey'])
host_spks.append(spk)
host_pubkeys.append(bytes.fromhex(info['pubkey']))
# Initialize variables used by block_submit().
self.lastblockhash = node.getbestblockhash()
self.tip = int(self.lastblockhash, 16)
block = node.getblock(self.lastblockhash)
self.lastblockheight = block['height']
self.lastblocktime = block['time']
# Create transactions spending up to 50 of the wallet's inputs, with one output for each spender, and
# one change output at the end. The transaction is constructed on the Python side to enable
# having multiple outputs to the same address and outputs with no assigned address. The wallet
# is then asked to sign it through signrawtransactionwithwallet, and then added to a block on the
# Python side (to bypass standardness rules).
self.log.info("- Creating test UTXOs...")
random.shuffle(spenders)
normal_utxos = []
mismatching_utxos = [] # UTXOs with input that requires mismatching output position
done = 0
while done < len(spenders):
# Compute how many UTXOs to create with this transaction
count_this_tx = min(len(spenders) - done, (len(spenders) + 4) // 5, 10000)
fund_tx = CTransaction()
# Add the 50 highest-value inputs
unspents = node.listunspent()
random.shuffle(unspents)
unspents.sort(key=lambda x: int(x["amount"] * 100000000), reverse=True)
if len(unspents) > 50:
unspents = unspents[:50]
random.shuffle(unspents)
balance = 0
for unspent in unspents:
balance += int(unspent["amount"] * 100000000)
txid = int(unspent["txid"], 16)
fund_tx.vin.append(CTxIn(COutPoint(txid, int(unspent["vout"])), CScript()))
# Add outputs
cur_progress = done / len(spenders)
next_progress = (done + count_this_tx) / len(spenders)
change_goal = (1.0 - 0.6 * next_progress) / (1.0 - 0.6 * cur_progress) * balance
self.log.debug("Create %i UTXOs in a transaction spending %i inputs worth %.8f (sending ~%.8f to change)" % (count_this_tx, len(unspents), balance * 0.00000001, change_goal * 0.00000001))
for i in range(count_this_tx):
avg = (balance - change_goal) / (count_this_tx - i)
amount = int(random.randrange(int(avg*0.85 + 0.5), int(avg*1.15 + 0.5)) + 0.5)
balance -= amount
fund_tx.vout.append(CTxOut(amount, spenders[done + i].script))
# Add change
fund_tx.vout.append(CTxOut(balance - 10000, random.choice(host_spks)))
# Ask the wallet to sign
ss = BytesIO(bytes.fromhex(node.signrawtransactionwithwallet(fund_tx.serialize().hex())["hex"]))
fund_tx.deserialize(ss)
# Construct UTXOData entries
fund_tx.rehash()
for i in range(count_this_tx):
utxodata = UTXOData(outpoint=COutPoint(fund_tx.sha256, i), output=fund_tx.vout[i], spender=spenders[done])
if utxodata.spender.need_vin_vout_mismatch:
mismatching_utxos.append(utxodata)
else:
normal_utxos.append(utxodata)
done += 1
# Mine into a block
self.block_submit(node, [fund_tx], "Funding tx", None, random.choice(host_pubkeys), 10000, MAX_BLOCK_SIGOPS_WEIGHT, True, True)
# Consume groups of choice(input_coins) from utxos in a tx, testing the spenders.
self.log.info("- Running %i spending tests" % done)
random.shuffle(normal_utxos)
random.shuffle(mismatching_utxos)
assert done == len(normal_utxos) + len(mismatching_utxos)
left = done
while left:
# Construct CTransaction with random nVersion, nLocktime
tx = CTransaction()
tx.nVersion = random.choice([1, 2, random.randint(-0x80000000, 0x7fffffff)])
min_sequence = (tx.nVersion != 1 and tx.nVersion != 0) * 0x80000000 # The minimum sequence number to disable relative locktime
if random.choice([True, False]):
tx.nLockTime = random.randrange(LOCKTIME_THRESHOLD, self.lastblocktime - 7200) # all absolute locktimes in the past
else:
tx.nLockTime = random.randrange(self.lastblockheight + 1) # all block heights in the past
# Decide how many UTXOs to test with.
acceptable = [n for n in input_counts if n <= left and (left - n > max(input_counts) or (left - n) in [0] + input_counts)]
num_inputs = random.choice(acceptable)
# If we have UTXOs that require mismatching inputs/outputs left, include exactly one of those
# unless there is only one normal UTXO left (as tests with mismatching UTXOs require at least one
# normal UTXO to go in the first position), and we don't want to run out of normal UTXOs.
input_utxos = []
while len(mismatching_utxos) and (len(input_utxos) == 0 or len(normal_utxos) == 1):
input_utxos.append(mismatching_utxos.pop())
left -= 1
# Top up until we hit num_inputs (but include at least one normal UTXO always).
for _ in range(max(1, num_inputs - len(input_utxos))):
input_utxos.append(normal_utxos.pop())
left -= 1
# The first input cannot require a mismatching output (as there is at least one output).
while True:
random.shuffle(input_utxos)
if not input_utxos[0].spender.need_vin_vout_mismatch:
break
first_mismatch_input = None
for i in range(len(input_utxos)):
if input_utxos[i].spender.need_vin_vout_mismatch:
first_mismatch_input = i
assert first_mismatch_input is None or first_mismatch_input > 0
# Decide fee, and add CTxIns to tx.
amount = sum(utxo.output.nValue for utxo in input_utxos)
fee = min(random.randrange(MIN_FEE * 2, MIN_FEE * 4), amount - DUST_LIMIT) # 10000-20000 sat fee
in_value = amount - fee
tx.vin = [CTxIn(outpoint=utxo.outpoint, nSequence=random.randint(min_sequence, 0xffffffff)) for utxo in input_utxos]
tx.wit.vtxinwit = [CTxInWitness() for _ in range(len(input_utxos))]
sigops_weight = sum(utxo.spender.sigops_weight for utxo in input_utxos)
self.log.debug("Test: %s" % (", ".join(utxo.spender.comment for utxo in input_utxos)))
# Add 1 to 4 random outputs (but constrained by inputs that require mismatching outputs)
num_outputs = random.choice(range(1, 1 + min(4, 4 if first_mismatch_input is None else first_mismatch_input)))
assert in_value >= 0 and fee - num_outputs * DUST_LIMIT >= MIN_FEE
for i in range(num_outputs):
tx.vout.append(CTxOut())
if in_value <= DUST_LIMIT:
tx.vout[-1].nValue = DUST_LIMIT
elif i < num_outputs - 1:
tx.vout[-1].nValue = in_value
else:
tx.vout[-1].nValue = random.randint(DUST_LIMIT, in_value)
in_value -= tx.vout[-1].nValue
tx.vout[-1].scriptPubKey = random.choice(host_spks)
sigops_weight += CScript(tx.vout[-1].scriptPubKey).GetSigOpCount(False) * WITNESS_SCALE_FACTOR
fee += in_value
assert fee >= 0
# Select coinbase pubkey
cb_pubkey = random.choice(host_pubkeys)
sigops_weight += 1 * WITNESS_SCALE_FACTOR
# Precompute one satisfying and one failing scriptSig/witness for each input.
input_data = []
for i in range(len(input_utxos)):
fn = input_utxos[i].spender.sat_function
fail = None
success = fn(tx, i, [utxo.output for utxo in input_utxos], True)
if not input_utxos[i].spender.no_fail:
fail = fn(tx, i, [utxo.output for utxo in input_utxos], False)
input_data.append((fail, success))
if self.options.dump_tests:
dump_json_test(tx, input_utxos, i, success, fail)
# Sign each input incorrectly once on each complete signing pass, except the very last.
for fail_input in list(range(len(input_utxos))) + [None]:
# Skip trying to fail at spending something that can't be made to fail.
if fail_input is not None and input_utxos[fail_input].spender.no_fail:
continue
# Expected message with each input failure, may be None(which is ignored)
expected_fail_msg = None if fail_input is None else input_utxos[fail_input].spender.err_msg
# Fill inputs/witnesses
for i in range(len(input_utxos)):
tx.vin[i].scriptSig = input_data[i][i != fail_input][0]
tx.wit.vtxinwit[i].scriptWitness.stack = input_data[i][i != fail_input][1]
# Submit to mempool to check standardness
is_standard_tx = fail_input is None and all(utxo.spender.is_standard for utxo in input_utxos) and tx.nVersion >= 1 and tx.nVersion <= 2
tx.rehash()
msg = ','.join(utxo.spender.comment + ("*" if n == fail_input else "") for n, utxo in enumerate(input_utxos))
if is_standard_tx:
node.sendrawtransaction(tx.serialize().hex(), 0)
assert node.getmempoolentry(tx.hash) is not None, "Failed to accept into mempool: " + msg
else:
assert_raises_rpc_error(-26, None, node.sendrawtransaction, tx.serialize().hex(), 0)
# Submit in a block
self.block_submit(node, [tx], msg, witness=True, accept=fail_input is None, cb_pubkey=cb_pubkey, fees=fee, sigops_weight=sigops_weight, err_msg=expected_fail_msg)
if (len(spenders) - left) // 200 > (len(spenders) - left - len(input_utxos)) // 200:
self.log.info(" - %i tests done" % (len(spenders) - left))
assert left == 0
assert len(normal_utxos) == 0
assert len(mismatching_utxos) == 0
self.log.info(" - Done")
def run_test(self):
# Post-taproot activation tests go first (pre-taproot tests' blocks are invalid post-taproot).
self.log.info("Post-activation tests...")
self.nodes[1].generate(COINBASE_MATURITY + 1)
self.test_spenders(self.nodes[1], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3])
# Re-connect nodes in case they have been disconnected
self.disconnect_nodes(0, 1)
self.connect_nodes(0, 1)
# Transfer value of the largest 500 coins to pre-taproot node.
addr = self.nodes[0].getnewaddress()
unsp = self.nodes[1].listunspent()
unsp = sorted(unsp, key=lambda i: i['amount'], reverse=True)
unsp = unsp[:500]
rawtx = self.nodes[1].createrawtransaction(
inputs=[{
'txid': i['txid'],
'vout': i['vout']
} for i in unsp],
outputs={addr: sum(i['amount'] for i in unsp)}
)
rawtx = self.nodes[1].signrawtransactionwithwallet(rawtx)['hex']
# Mine a block with the transaction
block = create_block(tmpl=self.nodes[1].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS), txlist=[rawtx])
add_witness_commitment(block)
block.rehash()
block.solve()
assert_equal(None, self.nodes[1].submitblock(block.serialize().hex()))
self.sync_blocks()
# Pre-taproot activation tests.
self.log.info("Pre-activation tests...")
# Run each test twice; once in isolation, and once combined with others. Testing in isolation
# means that the standardness is verified in every test (as combined transactions are only standard
# when all their inputs are standard).
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[1])
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[2, 3])
if __name__ == '__main__':
TaprootTest().main()
| 58.60757 | 363 | 0.670587 |
from test_framework.blocktools import (
COINBASE_MATURITY,
create_coinbase,
create_block,
add_witness_commitment,
MAX_BLOCK_SIGOPS_WEIGHT,
NORMAL_GBT_REQUEST_PARAMS,
WITNESS_SCALE_FACTOR,
)
from test_framework.messages import (
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
)
from test_framework.script import (
ANNEX_TAG,
CScript,
CScriptNum,
CScriptOp,
LEAF_VERSION_TAPSCRIPT,
LegacySignatureHash,
LOCKTIME_THRESHOLD,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_16,
OP_2DROP,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGADD,
OP_CHECKSIGVERIFY,
OP_CODESEPARATOR,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_IF,
OP_NOP,
OP_NOT,
OP_NOTIF,
OP_PUSHDATA1,
OP_RETURN,
OP_SWAP,
OP_VERIFY,
SIGHASH_DEFAULT,
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY,
SegwitV0SignatureHash,
TaprootSignatureHash,
is_op_success,
taproot_construct,
)
from test_framework.script_util import (
key_to_p2wpkh_script,
keyhash_to_p2pkh_script,
script_to_p2sh_script,
script_to_p2wsh_script,
)
from test_framework.test_framework import WidecoinTestFramework
from test_framework.util import assert_raises_rpc_error, assert_equal
from test_framework.key import generate_privkey, compute_xonly_pubkey, sign_schnorr, tweak_add_privkey, ECKey
from test_framework.address import (
hash160,
)
from collections import OrderedDict, namedtuple
from io import BytesIO
import json
import hashlib
import os
import random
def deep_eval(ctx, expr):
while callable(expr):
expr = expr(ctx)
if isinstance(expr, list):
expr = [deep_eval(ctx, x) for x in expr]
return expr
Final = namedtuple("Final", "value")
def get(ctx, name):
assert name in ctx, "Missing '%s' in context" % name
expr = ctx[name]
if not isinstance(expr, Final):
expr = Final(deep_eval(ctx, expr))
ctx[name] = expr
return expr.value
def getter(name):
return lambda ctx: get(ctx, name)
def override(expr, **kwargs):
return lambda ctx: deep_eval({**ctx, **kwargs}, expr)
def default_hashtype(ctx):
mode = get(ctx, "mode")
if mode == "taproot":
return SIGHASH_DEFAULT
else:
return SIGHASH_ALL
def default_tapleaf(ctx):
return get(ctx, "tap").leaves[get(ctx, "leaf")]
def default_script_taproot(ctx):
return get(ctx, "tapleaf").script
def default_leafversion(ctx):
return get(ctx, "tapleaf").version
def default_negflag(ctx):
return get(ctx, "tap").negflag
def default_pubkey_internal(ctx):
return get(ctx, "tap").internal_pubkey
def default_merklebranch(ctx):
return get(ctx, "tapleaf").merklebranch
def default_controlblock(ctx):
return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_internal") + get(ctx, "merklebranch")
def default_sighash(ctx):
tx = get(ctx, "tx")
idx = get(ctx, "idx")
hashtype = get(ctx, "hashtype_actual")
mode = get(ctx, "mode")
if mode == "taproot":
utxos = get(ctx, "utxos")
annex = get(ctx, "annex")
if get(ctx, "leaf") is not None:
codeseppos = get(ctx, "codeseppos")
leaf_ver = get(ctx, "leafversion")
script = get(ctx, "script_taproot")
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=True, script=script, leaf_ver=leaf_ver, codeseparator_pos=codeseppos, annex=annex)
else:
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=False, annex=annex)
elif mode == "witv0":
scriptcode = get(ctx, "scriptcode")
utxos = get(ctx, "utxos")
return SegwitV0SignatureHash(scriptcode, tx, idx, hashtype, utxos[idx].nValue)
else:
scriptcode = get(ctx, "scriptcode")
return LegacySignatureHash(scriptcode, tx, idx, hashtype)[0]
def default_tweak(ctx):
if get(ctx, "leaf") is None:
return get(ctx, "tap").tweak
return None
def default_key_tweaked(ctx):
key = get(ctx, "key")
tweak = get(ctx, "tweak")
if tweak is None:
return key
else:
return tweak_add_privkey(key, tweak)
def default_signature(ctx):
sighash = get(ctx, "sighash")
if get(ctx, "mode") == "taproot":
key = get(ctx, "key_tweaked")
flip_r = get(ctx, "flag_flip_r")
flip_p = get(ctx, "flag_flip_p")
return sign_schnorr(key, sighash, flip_r=flip_r, flip_p=flip_p)
else:
key = get(ctx, "key")
return key.sign_ecdsa(sighash)
def default_hashtype_actual(ctx):
hashtype = get(ctx, "hashtype")
mode = get(ctx, "mode")
if mode != "taproot":
return hashtype
idx = get(ctx, "idx")
tx = get(ctx, "tx")
if hashtype & 3 == SIGHASH_SINGLE and idx >= len(tx.vout):
return (hashtype & ~3) | SIGHASH_NONE
return hashtype
def default_bytes_hashtype(ctx):
return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0])
def default_sign(ctx):
return get(ctx, "signature") + get(ctx, "bytes_hashtype")
def default_inputs_keypath(ctx):
return [get(ctx, "sign")]
def default_witness_taproot(ctx):
annex = get(ctx, "annex")
suffix_annex = []
if annex is not None:
suffix_annex = [annex]
if get(ctx, "leaf") is None:
return get(ctx, "inputs_keypath") + suffix_annex
else:
return get(ctx, "inputs") + [bytes(get(ctx, "script_taproot")), get(ctx, "controlblock")] + suffix_annex
def default_witness_witv0(ctx):
script = get(ctx, "script_witv0")
inputs = get(ctx, "inputs")
if script is None:
return inputs
else:
return inputs + [script]
def default_witness(ctx):
mode = get(ctx, "mode")
if mode == "taproot":
return get(ctx, "witness_taproot")
elif mode == "witv0":
return get(ctx, "witness_witv0")
else:
return []
def default_scriptsig(ctx):
scriptsig = []
mode = get(ctx, "mode")
if mode == "legacy":
scriptsig = get(ctx, "inputs")
redeemscript = get(ctx, "script_p2sh")
if redeemscript is not None:
scriptsig += [bytes(redeemscript)]
return scriptsig
DEFAULT_CONTEXT = {
"witness": default_witness,
"scriptsig": default_scriptsig,
# The witness stack for spending a taproot output.
"witness_taproot": default_witness_taproot,
# The witness stack for spending a P2WPKH/P2WSH output.
"witness_witv0": default_witness_witv0,
# The script inputs for a taproot key path spend.
"inputs_keypath": default_inputs_keypath,
# The actual hashtype to use (usually equal to hashtype, but in taproot SIGHASH_SINGLE is not always allowed).
"hashtype_actual": default_hashtype_actual,
# The bytes object for a full signature (including hashtype byte, if needed).
"bytes_hashtype": default_bytes_hashtype,
# A full script signature (bytes including hashtype, if needed)
"sign": default_sign,
# An ECDSA or Schnorr signature (excluding hashtype byte).
"signature": default_signature,
# The 32-byte tweaked key (equal to key for script path spends, or key+tweak for key path spends).
"key_tweaked": default_key_tweaked,
# The tweak to use (None for script path spends, the actual tweak for key path spends).
"tweak": default_tweak,
# The sighash value (32 bytes)
"sighash": default_sighash,
# The information about the chosen script path spend (TaprootLeafInfo object).
"tapleaf": default_tapleaf,
# The script to push, and include in the sighash, for a taproot script path spend.
"script_taproot": default_script_taproot,
# The internal pubkey for a taproot script path spend (32 bytes).
"pubkey_internal": default_pubkey_internal,
# The negation flag of the internal pubkey for a taproot script path spend.
"negflag": default_negflag,
# The leaf version to include in the sighash (this does not affect the one in the control block).
"leafversion": default_leafversion,
# The Merkle path to include in the control block for a script path spend.
"merklebranch": default_merklebranch,
# The control block to push for a taproot script path spend.
"controlblock": default_controlblock,
# Whether to produce signatures with invalid P sign (Schnorr signatures only).
"flag_flip_p": False,
# Whether to produce signatures with invalid R sign (Schnorr signatures only).
"flag_flip_r": False,
# == Parameters that can be changed without invalidating, but do have a default: ==
# The hashtype (as an integer).
"hashtype": default_hashtype,
# The annex (only when mode=="taproot").
"annex": None,
# The codeseparator position (only when mode=="taproot").
"codeseppos": -1,
# The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH).
"script_p2sh": None,
# The script to add to the witness in (if P2WSH; None implies P2WPKH)
"script_witv0": None,
# The leaf to use in taproot spends (if script path spend; None implies key path spend).
"leaf": None,
# The input arguments to provide to the executed script
"inputs": [],
# == Parameters to be set before evaluation: ==
# - mode: what spending style to use ("taproot", "witv0", or "legacy").
# - key: the (untweaked) private key to sign with (ECKey object for ECDSA, 32 bytes for Schnorr).
# - tap: the TaprootInfo object (see taproot_construct; needed in mode=="taproot").
# - tx: the transaction to sign.
# - utxos: the UTXOs being spent (needed in mode=="witv0" and mode=="taproot").
# - idx: the input position being signed.
# - scriptcode: the scriptcode to include in legacy and witv0 sighashes.
}
def flatten(lst):
ret = []
for elem in lst:
if isinstance(elem, list):
ret += flatten(elem)
else:
ret.append(elem)
return ret
def spend(tx, idx, utxos, **kwargs):
ctx = {**DEFAULT_CONTEXT, "tx":tx, "idx":idx, "utxos":utxos, **kwargs}
def to_script(elem):
if isinstance(elem, CScript):
return elem
else:
return CScript([elem])
scriptsig_list = flatten(get(ctx, "scriptsig"))
scriptsig = CScript(b"".join(bytes(to_script(elem)) for elem in scriptsig_list))
witness_stack = flatten(get(ctx, "witness"))
return (scriptsig, witness_stack)
# === Spender objects ===
#
# Each spender is a tuple of:
# - A scriptPubKey which is to be spent from (CScript)
# - A comment describing the test (string)
# - Whether the spending (on itself) is expected to be standard (bool)
# - A tx-signing lambda returning (scriptsig, witness_stack), taking as inputs:
# - A transaction to sign (CTransaction)
# - An input position (int)
# - The spent UTXOs by this transaction (list of CTxOut)
# - Whether to produce a valid spend (bool)
# - A string with an expected error message for failure case if known
# - The (pre-taproot) sigops weight consumed by a successful spend
# - Whether this spend cannot fail
# - Whether this test demands being placed in a txin with no corresponding txout (for testing SIGHASH_SINGLE behavior)
Spender = namedtuple("Spender", "script,comment,is_standard,sat_function,err_msg,sigops_weight,no_fail,need_vin_vout_mismatch")
def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=False, spk_mutate_pre_p2sh=None, failure=None, standard=True, err_msg=None, sigops_weight=0, need_vin_vout_mismatch=False, **kwargs):
conf = dict()
# Compute scriptPubKey and set useful defaults based on the inputs.
if witv0:
assert tap is None
conf["mode"] = "witv0"
if pkh is not None:
# P2WPKH
assert script is None
pubkeyhash = hash160(pkh)
spk = key_to_p2wpkh_script(pkh)
conf["scriptcode"] = keyhash_to_p2pkh_script(pubkeyhash)
conf["script_witv0"] = None
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# P2WSH
spk = script_to_p2wsh_script(script)
conf["scriptcode"] = script
conf["script_witv0"] = script
else:
assert False
elif tap is None:
conf["mode"] = "legacy"
if pkh is not None:
# P2PKH
assert script is None
pubkeyhash = hash160(pkh)
spk = keyhash_to_p2pkh_script(pubkeyhash)
conf["scriptcode"] = spk
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# bare
spk = script
conf["scriptcode"] = script
else:
assert False
else:
assert script is None
conf["mode"] = "taproot"
conf["tap"] = tap
spk = tap.scriptPubKey
if spk_mutate_pre_p2sh is not None:
spk = spk_mutate_pre_p2sh(spk)
if p2sh:
# P2SH wrapper can be combined with anything else
conf["script_p2sh"] = spk
spk = script_to_p2sh_script(spk)
conf = {**conf, **kwargs}
def sat_fn(tx, idx, utxos, valid):
if valid:
return spend(tx, idx, utxos, **conf)
else:
assert failure is not None
return spend(tx, idx, utxos, **{**conf, **failure})
return Spender(script=spk, comment=comment, is_standard=standard, sat_function=sat_fn, err_msg=err_msg, sigops_weight=sigops_weight, no_fail=failure is None, need_vin_vout_mismatch=need_vin_vout_mismatch)
def add_spender(spenders, *args, **kwargs):
spenders.append(make_spender(*args, **kwargs))
# === Helpers for the test ===
def random_checksig_style(pubkey):
opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD])
if opcode == OP_CHECKSIGVERIFY:
ret = CScript([pubkey, opcode, OP_1])
elif opcode == OP_CHECKSIGADD:
num = random.choice([0, 0x7fffffff, -0x7fffffff])
ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL])
else:
ret = CScript([pubkey, opcode])
return bytes(ret)
def random_bytes(n):
return bytes(random.getrandbits(8) for i in range(n))
def bitflipper(expr):
def fn(ctx):
sub = deep_eval(ctx, expr)
assert isinstance(sub, bytes)
return (int.from_bytes(sub, 'little') ^ (1 << random.randrange(len(sub) * 8))).to_bytes(len(sub), 'little')
return fn
def zero_appender(expr):
return lambda ctx: deep_eval(ctx, expr) + b"\x00"
def byte_popper(expr):
return lambda ctx: deep_eval(ctx, expr)[:-1]
# Expected error strings
ERR_SIG_SIZE = {"err_msg": "Invalid Schnorr signature size"}
ERR_SIG_HASHTYPE = {"err_msg": "Invalid Schnorr signature hash type"}
ERR_SIG_SCHNORR = {"err_msg": "Invalid Schnorr signature"}
ERR_OP_RETURN = {"err_msg": "OP_RETURN was encountered"}
ERR_CONTROLBLOCK_SIZE = {"err_msg": "Invalid Taproot control block size"}
ERR_WITNESS_PROGRAM_MISMATCH = {"err_msg": "Witness program hash mismatch"}
ERR_PUSH_LIMIT = {"err_msg": "Push value size limit exceeded"}
ERR_DISABLED_OPCODE = {"err_msg": "Attempted to use a disabled opcode"}
ERR_TAPSCRIPT_CHECKMULTISIG = {"err_msg": "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"}
ERR_MINIMALIF = {"err_msg": "OP_IF/NOTIF argument must be minimal in tapscript"}
ERR_UNKNOWN_PUBKEY = {"err_msg": "Public key is neither compressed or uncompressed"}
ERR_STACK_SIZE = {"err_msg": "Stack size limit exceeded"}
ERR_CLEANSTACK = {"err_msg": "Stack size must be exactly one after execution"}
ERR_STACK_EMPTY = {"err_msg": "Operation not valid with the current stack size"}
ERR_SIGOPS_RATIO = {"err_msg": "Too much signature validation relative to witness weight"}
ERR_UNDECODABLE = {"err_msg": "Opcode missing or not understood"}
ERR_NO_SUCCESS = {"err_msg": "Script evaluated without error but finished with a false/empty top stack element"}
ERR_EMPTY_WITNESS = {"err_msg": "Witness program was passed an empty witness"}
ERR_CHECKSIGVERIFY = {"err_msg": "Script failed an OP_CHECKSIGVERIFY operation"}
VALID_SIGHASHES_ECDSA = [
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_ALL,
SIGHASH_ANYONECANPAY + SIGHASH_NONE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT = [SIGHASH_DEFAULT] + VALID_SIGHASHES_ECDSA
VALID_SIGHASHES_TAPROOT_SINGLE = [
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT_NO_SINGLE = [h for h in VALID_SIGHASHES_TAPROOT if h not in VALID_SIGHASHES_TAPROOT_SINGLE]
SIGHASH_BITFLIP = {"failure": {"sighash": bitflipper(default_sighash)}}
SIG_POP_BYTE = {"failure": {"sign": byte_popper(default_sign)}}
SINGLE_SIG = {"inputs": [getter("sign")]}
SIG_ADD_ZERO = {"failure": {"sign": zero_appender(default_sign)}}
DUST_LIMIT = 600
MIN_FEE = 50000
# === Actual test cases ===
def spenders_taproot_active():
secs = [generate_privkey() for _ in range(8)]
pubs = [compute_xonly_pubkey(sec)[0] for sec in secs]
spenders = []
# == Tests for BIP340 signature validation. ==
# These are primarily tested through the test vectors implemented in libsecp256k1, and in src/tests/key_tests.cpp.
# Some things are tested programmatically as well here.
tap = taproot_construct(pubs[0])
# Test with key with bit flipped.
add_spender(spenders, "sig/key", tap=tap, key=secs[0], failure={"key_tweaked": bitflipper(default_key_tweaked)}, **ERR_SIG_SCHNORR)
# Test with sighash with bit flipped.
add_spender(spenders, "sig/sighash", tap=tap, key=secs[0], failure={"sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# Test with invalid R sign.
add_spender(spenders, "sig/flip_r", tap=tap, key=secs[0], failure={"flag_flip_r": True}, **ERR_SIG_SCHNORR)
# Test with invalid P sign.
add_spender(spenders, "sig/flip_p", tap=tap, key=secs[0], failure={"flag_flip_p": True}, **ERR_SIG_SCHNORR)
# Test with signature with bit flipped.
add_spender(spenders, "sig/bitflip", tap=tap, key=secs[0], failure={"signature": bitflipper(default_signature)}, **ERR_SIG_SCHNORR)
# == Tests for signature hashing ==
# Run all tests once with no annex, and once with a valid random annex.
for annex in [None, lambda _: bytes([ANNEX_TAG]) + random_bytes(random.randrange(0, 250))]:
# Non-empty annex is non-standard
no_annex = annex is None
# Sighash mutation tests (test all sighash combinations)
for hashtype in VALID_SIGHASHES_TAPROOT:
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
# Pure pubkey
tap = taproot_construct(pubs[0])
add_spender(spenders, "sighash/purepk", tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Pubkey/P2PK script combination
scripts = [("s0", CScript(random_checksig_style(pubs[1])))]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/keypath_hashtype_%x" % hashtype, tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath_hashtype_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Test SIGHASH_SINGLE behavior in combination with mismatching outputs
if hashtype in VALID_SIGHASHES_TAPROOT_SINGLE:
add_spender(spenders, "sighash/keypath_hashtype_mis_%x" % hashtype, tap=tap, key=secs[0], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
add_spender(spenders, "sighash/scriptpath_hashtype_mis_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), **SINGLE_SIG, failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
# Test OP_CODESEPARATOR impact on sighashing.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
scripts = [
("pk_codesep", CScript(random_checksig_style(pubs[1]) + bytes([OP_CODESEPARATOR]))), # codesep after checksig
("codesep_pk", CScript(bytes([OP_CODESEPARATOR]) + random_checksig_style(pubs[1]))), # codesep before checksig
("branched_codesep", CScript([random_bytes(random.randrange(511)), OP_DROP, OP_IF, OP_CODESEPARATOR, pubs[0], OP_ELSE, OP_CODESEPARATOR, pubs[1], OP_ENDIF, OP_CHECKSIG])), # branch dependent codesep
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/pk_codesep", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/codesep_pk", tap=tap, leaf="codesep_pk", key=secs[1], codeseppos=0, **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/left", tap=tap, leaf="branched_codesep", key=secs[0], codeseppos=3, **common, inputs=[getter("sign"), b'\x01'], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/right", tap=tap, leaf="branched_codesep", key=secs[1], codeseppos=6, **common, inputs=[getter("sign"), b''], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Reusing the scripts above, test that various features affect the sighash.
add_spender(spenders, "sighash/annex", tap=tap, leaf="pk_codesep", key=secs[1], hashtype=hashtype, standard=False, **SINGLE_SIG, annex=bytes([ANNEX_TAG]), failure={"sighash": override(default_sighash, annex=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/script", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, script_taproot=tap.leaves["codesep_pk"].script)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE != 0xC0]))}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leaf=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/keypath", tap=tap, key=secs[0], **common, failure={"sighash": override(default_sighash, leaf="pk_codesep")}, **ERR_SIG_SCHNORR)
# Test that invalid hashtypes don't work, both in key path and script path spends
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for invalid_hashtype in [x for x in range(0x100) if x not in VALID_SIGHASHES_TAPROOT]:
add_spender(spenders, "sighash/keypath_unk_hashtype_%x" % invalid_hashtype, tap=tap, key=secs[0], hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/scriptpath_unk_hashtype_%x" % invalid_hashtype, tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype0_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype0_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype1_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype0to1_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype0to1_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
for hashtype in [SIGHASH_DEFAULT, random.choice(VALID_SIGHASHES_TAPROOT)]:
scripts = [
("csv", CScript([pubs[2], OP_CHECKSIGVERIFY, OP_1])),
("cs_pos", CScript([pubs[2], OP_CHECKSIG])),
("csa_pos", CScript([OP_0, pubs[2], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
("cs_neg", CScript([pubs[2], OP_CHECKSIG, OP_NOT])),
("csa_neg", CScript([OP_2, pubs[2], OP_CHECKSIGADD, OP_2, OP_EQUAL]))
]
random.shuffle(scripts)
tap = taproot_construct(pubs[3], scripts)
add_spender(spenders, "siglen/empty_keypath", tap=tap, key=secs[3], hashtype=hashtype, failure={"sign": b""}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_CHECKSIGVERIFY)
add_spender(spenders, "siglen/empty_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(1, 63))}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(66, 100))}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/padzero_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/popbyte_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/invalid_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "siglen/invalid_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
for p2sh in [False, True]:
for witver in range(1, 17):
for witlen in [20, 31, 32, 33]:
def mutate(spk):
prog = spk[2:]
assert len(prog) == 32
if witlen < 32:
prog = prog[0:witlen]
elif witlen > 32:
prog += bytes([0 for _ in range(witlen - 32)])
return CScript([CScriptOp.encode_op_n(witver), prog])
scripts = [("s0", CScript([pubs[0], OP_CHECKSIG])), ("dummy", CScript([OP_RETURN]))]
tap = taproot_construct(pubs[1], scripts)
if not p2sh and witver == 1 and witlen == 32:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, failure={"leaf": "dummy"}, **ERR_OP_RETURN)
else:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], standard=False)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, standard=False)
PARTNER_MERKLE_FN = [
lambda h: h,
lambda h: bytes([0 for _ in range(32)]),
lambda h: bytes([0xff for _ in range(32)]),
lambda h: (int.from_bytes(h, 'big') - 1).to_bytes(32, 'big'),
lambda h: (int.from_bytes(h, 'big') + 1).to_bytes(32, 'big'),
lambda h: (int.from_bytes(h, 'little') - 1).to_bytes(32, 'big'),
lambda h: (int.from_bytes(h, 'little') + 1).to_bytes(32, 'little'),
lambda h: (int.from_bytes(h, 'little') ^ (1 << random.randrange(256))).to_bytes(32, 'little')
]
scripts = [("128deep", CScript([pubs[0], OP_CHECKSIG])), [("129deep", CScript([pubs[0], OP_CHECKSIG])), random.choice(PARTNER_MERKLE_FN)]]
for _ in range(127):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "spendpath/merklelimit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"leaf": "129deep"}, **ERR_CONTROLBLOCK_SIZE)
# Test that flipping the negation bit invalidates spends.
add_spender(spenders, "spendpath/negflag", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"negflag": lambda ctx: 1 - default_negflag(ctx)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the Merkle branch invalidate it.
add_spender(spenders, "spendpath/bitflipmerkle", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"merklebranch": bitflipper(default_merklebranch)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the internal pubkey invalidate it.
add_spender(spenders, "spendpath/bitflippubkey", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"pubkey_internal": bitflipper(default_pubkey_internal)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that empty witnesses are invalid.
add_spender(spenders, "spendpath/emptywit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"witness": []}, **ERR_EMPTY_WITNESS)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padlongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/trunclongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
scripts = [("s", CScript([pubs[0], OP_CHECKSIG]))]
tap = taproot_construct(pubs[1], scripts)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/truncshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block to 1 byte ("-1 Merkle length") invalidates it
add_spender(spenders, "spendpath/trunc1shortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:1]}, **ERR_CONTROLBLOCK_SIZE)
# == Test BIP342 edge cases ==
csa_low_val = random.randrange(0, 17) # Within range for OP_n
csa_low_result = csa_low_val + 1
csa_high_val = random.randrange(17, 100) if random.getrandbits(1) else random.randrange(-100, -1) # Outside OP_n range
csa_high_result = csa_high_val + 1
OVERSIZE_NUMBER = 2**31
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER))), 6)
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER-1))), 5)
big_choices = []
big_scriptops = []
for i in range(1000):
r = random.randrange(len(pubs))
big_choices.append(r)
big_scriptops += [pubs[r], OP_CHECKSIGVERIFY]
def big_spend_inputs(ctx):
# Instead of signing 999 times, precompute signatures for every (key, hashtype) combination
sigs = {}
for ht in VALID_SIGHASHES_TAPROOT:
for k in range(len(pubs)):
sigs[(k, ht)] = override(default_sign, hashtype=ht, key=secs[k])(ctx)
num = get(ctx, "num")
return [sigs[(big_choices[i], random.choice(VALID_SIGHASHES_TAPROOT))] for i in range(num - 1, -1, -1)]
# Various BIP342 features
scripts = [
# 0) drop stack element and OP_CHECKSIG
("t0", CScript([OP_DROP, pubs[1], OP_CHECKSIG])),
# 1) normal OP_CHECKSIG
("t1", CScript([pubs[1], OP_CHECKSIG])),
# 2) normal OP_CHECKSIGVERIFY
("t2", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 3) Hypothetical OP_CHECKMULTISIG script that takes a single sig as input
("t3", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIG])),
# 4) Hypothetical OP_CHECKMULTISIGVERIFY script that takes a single sig as input
("t4", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIGVERIFY, OP_1])),
# 5) OP_IF script that needs a true input
("t5", CScript([OP_IF, pubs[1], OP_CHECKSIG, OP_ELSE, OP_RETURN, OP_ENDIF])),
# 6) OP_NOTIF script that needs a true input
("t6", CScript([OP_NOTIF, OP_RETURN, OP_ELSE, pubs[1], OP_CHECKSIG, OP_ENDIF])),
# 7) OP_CHECKSIG with an empty key
("t7", CScript([OP_0, OP_CHECKSIG])),
# 8) OP_CHECKSIGVERIFY with an empty key
("t8", CScript([OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 9) normal OP_CHECKSIGADD that also ensures return value is correct
("t9", CScript([csa_low_val, pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 10) OP_CHECKSIGADD with empty key
("t10", CScript([csa_low_val, OP_0, OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 11) OP_CHECKSIGADD with missing counter stack element
("t11", CScript([pubs[1], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
# 12) OP_CHECKSIG that needs invalid signature
("t12", CScript([pubs[1], OP_CHECKSIGVERIFY, pubs[0], OP_CHECKSIG, OP_NOT])),
# 13) OP_CHECKSIG with empty key that needs invalid signature
("t13", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_CHECKSIG, OP_NOT])),
# 14) OP_CHECKSIGADD that needs invalid signature
("t14", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, pubs[0], OP_CHECKSIGADD, OP_NOT])),
# 15) OP_CHECKSIGADD with empty key that needs invalid signature
("t15", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGADD, OP_NOT])),
# 16) OP_CHECKSIG with unknown pubkey type
("t16", CScript([OP_1, OP_CHECKSIG])),
# 17) OP_CHECKSIGADD with unknown pubkey type
("t17", CScript([OP_0, OP_1, OP_CHECKSIGADD])),
# 18) OP_CHECKSIGVERIFY with unknown pubkey type
("t18", CScript([OP_1, OP_CHECKSIGVERIFY, OP_1])),
# 19) script longer than 10000 bytes and over 201 non-push opcodes
("t19", CScript([OP_0, OP_0, OP_2DROP] * 10001 + [pubs[1], OP_CHECKSIG])),
# 20) OP_CHECKSIGVERIFY with empty key
("t20", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 21) Script that grows the stack to 1000 elements
("t21", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 999 + [OP_DROP] * 999)),
# 22) Script that grows the stack to 1001 elements
("t22", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 1000 + [OP_DROP] * 1000)),
# 23) Script that expects an input stack of 1000 elements
("t23", CScript([OP_DROP] * 999 + [pubs[1], OP_CHECKSIG])),
# 24) Script that expects an input stack of 1001 elements
("t24", CScript([OP_DROP] * 1000 + [pubs[1], OP_CHECKSIG])),
# 25) Script that pushes a MAX_SCRIPT_ELEMENT_SIZE-bytes element
("t25", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE), OP_DROP, pubs[1], OP_CHECKSIG])),
# 26) Script that pushes a (MAX_SCRIPT_ELEMENT_SIZE+1)-bytes element
("t26", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, pubs[1], OP_CHECKSIG])),
# 27) CHECKSIGADD that must fail because numeric argument number is >4 bytes
("t27", CScript([CScriptNum(OVERSIZE_NUMBER), pubs[1], OP_CHECKSIGADD])),
# 28) Pushes random CScriptNum value, checks OP_CHECKSIGADD result
("t28", CScript([csa_high_val, pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 29) CHECKSIGADD that succeeds with proper sig because numeric argument number is <=4 bytes
("t29", CScript([CScriptNum(OVERSIZE_NUMBER-1), pubs[1], OP_CHECKSIGADD])),
# 30) Variant of t1 with "normal" 33-byte pubkey
("t30", CScript([b'\x03' + pubs[1], OP_CHECKSIG])),
# 31) Variant of t2 with "normal" 33-byte pubkey
("t31", CScript([b'\x02' + pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 32) Variant of t28 with "normal" 33-byte pubkey
("t32", CScript([csa_high_val, b'\x03' + pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 33) 999-of-999 multisig
("t33", CScript(big_scriptops[:1998] + [OP_1])),
# 34) 1000-of-1000 multisig
("t34", CScript(big_scriptops[:2000] + [OP_1])),
# 35) Variant of t9 that uses a non-minimally encoded input arg
("t35", CScript([bytes([csa_low_val]), pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 36) Empty script
("t36", CScript([])),
]
# Add many dummies to test huge trees
for j in range(100000):
scripts.append((None, CScript([OP_RETURN, random.randrange(100000)])))
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
common = {
"hashtype": hashtype,
"key": secs[1],
"tap": tap,
}
# Test that MAX_SCRIPT_ELEMENT_SIZE byte stack element inputs are valid, but not one more (and 80 bytes is standard but 81 is not).
add_spender(spenders, "tapscript/inputmaxlimit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE)], failure={"inputs": [getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1)]}, **ERR_PUSH_LIMIT)
add_spender(spenders, "tapscript/input80limit", leaf="t0", **common, inputs=[getter("sign"), random_bytes(80)])
add_spender(spenders, "tapscript/input81limit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(81)])
# Test that OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY cause failure, but OP_CHECKSIG and OP_CHECKSIGVERIFY work.
add_spender(spenders, "tapscript/disabled_checkmultisig", leaf="t1", **common, **SINGLE_SIG, failure={"leaf": "t3"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
add_spender(spenders, "tapscript/disabled_checkmultisigverify", leaf="t2", **common, **SINGLE_SIG, failure={"leaf": "t4"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
# Test that OP_IF and OP_NOTIF do not accept non-0x01 as truth value (the MINIMALIF rule is consensus in Tapscript)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x02']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x03']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0001']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0100']}, **ERR_MINIMALIF)
# Test that 1-byte public keys (which are unknown) are acceptable but nonstandard with unrelated signatures, but 0-byte public keys are not valid.
add_spender(spenders, "tapscript/unkpk/checksig", leaf="t16", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigadd", leaf="t17", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigverify", leaf="t18", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
# Test that 33-byte public keys (which are unknown) are acceptable but nonstandard with valid signatures, but normal pubkeys are not valid in that case.
add_spender(spenders, "tapscript/oldpk/checksig", leaf="t30", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t1"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigadd", leaf="t31", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t2"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigverify", leaf="t32", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t28"}, **ERR_SIG_SCHNORR)
# Test that 0-byte public keys are not acceptable.
add_spender(spenders, "tapscript/emptypk/checksig", leaf="t1", **SINGLE_SIG, **common, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigverify", leaf="t2", **SINGLE_SIG, **common, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t35", standard=False, **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
# Test that OP_CHECKSIGADD results are as expected
add_spender(spenders, "tapscript/checksigaddresults", leaf="t28", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
add_spender(spenders, "tapscript/checksigaddoversize", leaf="t29", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
# Test that OP_CHECKSIGADD requires 3 stack elements.
add_spender(spenders, "tapscript/checksigadd3args", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t11"}, **ERR_STACK_EMPTY)
# Test that empty signatures do not cause script failure in OP_CHECKSIG and OP_CHECKSIGADD (but do fail with empty pubkey, and do fail OP_CHECKSIGVERIFY)
add_spender(spenders, "tapscript/emptysigs/checksig", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t13"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/nochecksigverify", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t20"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/checksigadd", leaf="t14", **common, inputs=[b'', getter("sign")], failure={"leaf": "t15"}, **ERR_UNKNOWN_PUBKEY)
# Test that scripts over 10000 bytes (and over 201 non-push ops) are acceptable.
add_spender(spenders, "tapscript/no10000limit", leaf="t19", **SINGLE_SIG, **common)
# Test that a stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000stack", leaf="t21", **SINGLE_SIG, **common, failure={"leaf": "t22"}, **ERR_STACK_SIZE)
add_spender(spenders, "tapscript/1000inputs", leaf="t23", **common, inputs=[getter("sign")] + [b'' for _ in range(999)], failure={"leaf": "t24", "inputs": [getter("sign")] + [b'' for _ in range(1000)]}, **ERR_STACK_SIZE)
# Test that pushing a MAX_SCRIPT_ELEMENT_SIZE byte stack element is valid, but one longer is not.
add_spender(spenders, "tapscript/pushmaxlimit", leaf="t25", **common, **SINGLE_SIG, failure={"leaf": "t26"}, **ERR_PUSH_LIMIT)
# Test that 999-of-999 multisig works (but 1000-of-1000 triggers stack size limits)
add_spender(spenders, "tapscript/bigmulti", leaf="t33", **common, inputs=big_spend_inputs, num=999, failure={"leaf": "t34", "num": 1000}, **ERR_STACK_SIZE)
# Test that the CLEANSTACK rule is consensus critical in tapscript
add_spender(spenders, "tapscript/cleanstack", leaf="t36", tap=tap, inputs=[b'\x01'], failure={"inputs": [b'\x01', b'\x01']}, **ERR_CLEANSTACK)
# == Test for sigops ratio limit ==
# Given a number n, and a public key pk, functions that produce a (CScript, sigops). Each script takes as
# input a valid signature with the passed pk followed by a dummy push of bytes that are to be dropped, and
# will execute sigops signature checks.
SIGOPS_RATIO_SCRIPTS = [
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIG.
lambda n, pk: (CScript([OP_DROP, pk] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGVERIFY.
lambda n, pk: (CScript([OP_DROP, pk, OP_0, OP_IF, OP_2DUP, OP_CHECKSIGVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_2, OP_SWAP, OP_CHECKSIGADD, OP_3, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIG.
lambda n, pk: (CScript([random_bytes(220), OP_2DROP, pk, OP_1, OP_NOTIF, OP_2DUP, OP_CHECKSIG, OP_VERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_4, OP_SWAP, OP_CHECKSIGADD, OP_5, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGADD.
lambda n, pk: (CScript([OP_DROP, pk, OP_1, OP_IF, OP_ELSE, OP_2DUP, OP_6, OP_SWAP, OP_CHECKSIGADD, OP_7, OP_EQUALVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_8, OP_SWAP, OP_CHECKSIGADD, OP_9, OP_EQUAL]), n + 1),
# n+1 OP_CHECKSIGs, but also one OP_CHECKSIG with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, pk, OP_CHECKSIG, OP_NOT, OP_VERIFY, pk] + [OP_2DUP, OP_CHECKSIG, OP_VERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGADDs and 1 OP_CHECKSIG, but also an OP_CHECKSIGADD with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, OP_10, pk, OP_CHECKSIGADD, OP_10, OP_EQUALVERIFY, pk] + [OP_2DUP, OP_16, OP_SWAP, OP_CHECKSIGADD, b'\x11', OP_EQUALVERIFY] * n + [OP_CHECKSIG]), n + 1),
]
for annex in [None, bytes([ANNEX_TAG]) + random_bytes(random.randrange(1000))]:
for hashtype in [SIGHASH_DEFAULT, SIGHASH_ALL]:
for pubkey in [pubs[1], random_bytes(random.choice([x for x in range(2, 81) if x != 32]))]:
for fn_num, fn in enumerate(SIGOPS_RATIO_SCRIPTS):
merkledepth = random.randrange(129)
def predict_sigops_ratio(n, dummy_size):
script, sigops = fn(n, pubkey)
# Predict the size of the witness for a given choice of n
stacklen_size = 1
sig_size = 64 + (hashtype != SIGHASH_DEFAULT)
siglen_size = 1
dummylen_size = 1 + 2 * (dummy_size >= 253)
script_size = len(script)
scriptlen_size = 1 + 2 * (script_size >= 253)
control_size = 33 + 32 * merkledepth
controllen_size = 1 + 2 * (control_size >= 253)
annex_size = 0 if annex is None else len(annex)
annexlen_size = 0 if annex is None else 1 + 2 * (annex_size >= 253)
witsize = stacklen_size + sig_size + siglen_size + dummy_size + dummylen_size + script_size + scriptlen_size + control_size + controllen_size + annex_size + annexlen_size
# sigops ratio test
return witsize + 50 >= 50 * sigops
# Make sure n is high enough that with empty dummy, the script is not valid
n = 0
while predict_sigops_ratio(n, 0):
n += 1
# But allow picking a bit higher still
n += random.randrange(5)
# Now pick dummy size *just* large enough that the overall construction passes
dummylen = 0
while not predict_sigops_ratio(n, dummylen):
dummylen += 1
scripts = [("s", fn(n, pubkey)[0])]
for _ in range(merkledepth):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
standard = annex is None and dummylen <= 80 and len(pubkey) == 32
add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random_bytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random_bytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO)
# Future leaf versions
for leafver in range(0, 0x100, 2):
if leafver == LEAF_VERSION_TAPSCRIPT or leafver == ANNEX_TAG:
# Skip the defined LEAF_VERSION_TAPSCRIPT, and the ANNEX_TAG which is not usable as leaf version
continue
scripts = [
("bare_c0", CScript([OP_NOP])),
("bare_unkver", CScript([OP_NOP]), leafver),
("return_c0", CScript([OP_RETURN])),
("return_unkver", CScript([OP_RETURN]), leafver),
("undecodable_c0", CScript([OP_PUSHDATA1])),
("undecodable_unkver", CScript([OP_PUSHDATA1]), leafver),
("bigpush_c0", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP])),
("bigpush_unkver", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP]), leafver),
("1001push_c0", CScript([OP_0] * 1001)),
("1001push_unkver", CScript([OP_0] * 1001), leafver),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "unkver/bare", standard=False, tap=tap, leaf="bare_unkver", failure={"leaf": "bare_c0"}, **ERR_CLEANSTACK)
add_spender(spenders, "unkver/return", standard=False, tap=tap, leaf="return_unkver", failure={"leaf": "return_c0"}, **ERR_OP_RETURN)
add_spender(spenders, "unkver/undecodable", standard=False, tap=tap, leaf="undecodable_unkver", failure={"leaf": "undecodable_c0"}, **ERR_UNDECODABLE)
add_spender(spenders, "unkver/bigpush", standard=False, tap=tap, leaf="bigpush_unkver", failure={"leaf": "bigpush_c0"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "unkver/1001push", standard=False, tap=tap, leaf="1001push_unkver", failure={"leaf": "1001push_c0"}, **ERR_STACK_SIZE)
add_spender(spenders, "unkver/1001inputs", standard=False, tap=tap, leaf="bare_unkver", inputs=[b'']*1001, failure={"leaf": "bare_c0"}, **ERR_STACK_SIZE)
# OP_SUCCESSx tests.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for opval in range(76, 0x100):
opcode = CScriptOp(opval)
if not is_op_success(opcode):
continue
scripts = [
("bare_success", CScript([opcode])),
("bare_nop", CScript([OP_NOP])),
("unexecif_success", CScript([OP_0, OP_IF, opcode, OP_ENDIF])),
("unexecif_nop", CScript([OP_0, OP_IF, OP_NOP, OP_ENDIF])),
("return_success", CScript([OP_RETURN, opcode])),
("return_nop", CScript([OP_RETURN, OP_NOP])),
("undecodable_success", CScript([opcode, OP_PUSHDATA1])),
("undecodable_nop", CScript([OP_NOP, OP_PUSHDATA1])),
("undecodable_bypassed_success", CScript([OP_PUSHDATA1, OP_2, opcode])),
("bigpush_success", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, opcode])),
("bigpush_nop", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, OP_NOP])),
("1001push_success", CScript([OP_0] * 1001 + [opcode])),
("1001push_nop", CScript([OP_0] * 1001 + [OP_NOP])),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "opsuccess/bare", standard=False, tap=tap, leaf="bare_success", failure={"leaf": "bare_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/unexecif", standard=False, tap=tap, leaf="unexecif_success", failure={"leaf": "unexecif_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/return", standard=False, tap=tap, leaf="return_success", failure={"leaf": "return_nop"}, **ERR_OP_RETURN)
add_spender(spenders, "opsuccess/undecodable", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_nop"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/undecodable_bypass", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_bypassed_success"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/bigpush", standard=False, tap=tap, leaf="bigpush_success", failure={"leaf": "bigpush_nop"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "opsuccess/1001push", standard=False, tap=tap, leaf="1001push_success", failure={"leaf": "1001push_nop"}, **ERR_STACK_SIZE)
add_spender(spenders, "opsuccess/1001inputs", standard=False, tap=tap, leaf="bare_success", inputs=[b'']*1001, failure={"leaf": "bare_nop"}, **ERR_STACK_SIZE)
# Non-OP_SUCCESSx (verify that those aren't accidentally treated as OP_SUCCESSx)
for opval in range(0, 0x100):
opcode = CScriptOp(opval)
if is_op_success(opcode):
continue
scripts = [
("normal", CScript([OP_RETURN, opcode] + [OP_NOP] * 75)),
("op_success", CScript([OP_RETURN, CScriptOp(0x50)]))
]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "alwaysvalid/notsuccessx", tap=tap, leaf="op_success", inputs=[], standard=False, failure={"leaf": "normal"})
for compressed in [False, True]:
eckey1 = ECKey()
eckey1.set(generate_privkey(), compressed)
pubkey1 = eckey1.get_pubkey().get_bytes()
eckey2 = ECKey()
eckey2.set(generate_privkey(), compressed)
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = (hashtype in VALID_SIGHASHES_ECDSA) and (compressed or not witv0)
add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([pubkey1, OP_CHECKSIG]), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
add_spender(spenders, "legacy/pkh-sighashflip", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, pkh=pubkey1, key=eckey1, **SIGHASH_BITFLIP, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0)
add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE)
return spenders
def spenders_taproot_inactive():
spenders = []
sec = generate_privkey()
pub, _ = compute_xonly_pubkey(sec)
scripts = [
("pk", CScript([pub, OP_CHECKSIG])),
("future_leaf", CScript([pub, OP_CHECKSIG]), 0xc2),
("op_success", CScript([pub, OP_CHECKSIG, OP_0, OP_IF, CScriptOp(0x50), OP_ENDIF])),
]
tap = taproot_construct(pub, scripts)
# Test that keypath spending is valid & non-standard, regardless of validity.
add_spender(spenders, "inactive/keypath_valid", key=sec, tap=tap, standard=False)
add_spender(spenders, "inactive/keypath_invalidsig", key=sec, tap=tap, standard=False, sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/keypath_empty", key=sec, tap=tap, standard=False, witness=[])
# Same for scriptpath spending (and features like annex, leaf versions, or OP_SUCCESS don't change this)
add_spender(spenders, "inactive/scriptpath_valid", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalidsig", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_invalidcb", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], controlblock=bitflipper(default_controlblock))
add_spender(spenders, "inactive/scriptpath_valid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
return spenders
LEGACY_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY"
TAPROOT_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY,TAPROOT"
def dump_json_test(tx, input_utxos, idx, success, failure):
spender = input_utxos[idx].spender
flags = LEGACY_FLAGS if spender.comment.startswith("legacy/") or spender.comment.startswith("inactive/") else TAPROOT_FLAGS
fields = [
("tx", tx.serialize().hex()),
("prevouts", [x.output.serialize().hex() for x in input_utxos]),
("index", idx),
("flags", flags),
("comment", spender.comment)
]
if spender.is_standard:
fields.append(("final", True))
def dump_witness(wit):
return OrderedDict([("scriptSig", wit[0].hex()), ("witness", [x.hex() for x in wit[1]])])
if success is not None:
fields.append(("success", dump_witness(success)))
if failure is not None:
fields.append(("failure", dump_witness(failure)))
dump = json.dumps(OrderedDict(fields)) + ",\n"
sha1 = hashlib.sha1(dump.encode("utf-8")).hexdigest()
dirname = os.environ.get("TEST_DUMP_DIR", ".") + ("/%s" % sha1[0])
os.makedirs(dirname, exist_ok=True)
with open(dirname + ("/%s" % sha1), 'w', encoding="utf8") as f:
f.write(dump)
UTXOData = namedtuple('UTXOData', 'outpoint,output,spender')
class TaprootTest(WidecoinTestFramework):
def add_options(self, parser):
parser.add_argument("--dumptests", dest="dump_tests", default=False, action="store_true",
help="Dump generated test cases to directory set by TEST_DUMP_DIR environment variable")
parser.add_argument("--previous_release", dest="previous_release", default=False, action="store_true",
help="Use a previous release as taproot-inactive node")
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
if self.options.previous_release:
self.skip_if_no_previous_releases()
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [["-par=1"], ["-par=1"]]
if self.options.previous_release:
self.wallet_names = [None, self.default_wallet_name]
else:
self.extra_args[0].append("-vbparams=taproot:1:1")
def setup_nodes(self):
self.add_nodes(self.num_nodes, self.extra_args, versions=[
200100 if self.options.previous_release else None,
None,
])
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False):
extra_output_script = CScript([OP_CHECKSIG]*((MAX_BLOCK_SIGOPS_WEIGHT - sigops_weight) // WITNESS_SCALE_FACTOR))
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1, pubkey=cb_pubkey, extra_output_script=extra_output_script, fees=fees), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
block_response = node.submitblock(block.serialize().hex())
if err_msg is not None:
assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg)
if accept:
assert node.getbestblockhash() == block.hash, "Failed to accept: %s (response: %s)" % (msg, block_response)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert node.getbestblockhash() == self.lastblockhash, "Failed to reject: " + msg
def test_spenders(self, node, spenders, input_counts):
self.log.info("- Constructing addresses for returning coins")
host_spks = []
host_pubkeys = []
for i in range(16):
addr = node.getnewaddress(address_type=random.choice(["legacy", "p2sh-segwit", "bech32"]))
info = node.getaddressinfo(addr)
spk = bytes.fromhex(info['scriptPubKey'])
host_spks.append(spk)
host_pubkeys.append(bytes.fromhex(info['pubkey']))
self.lastblockhash = node.getbestblockhash()
self.tip = int(self.lastblockhash, 16)
block = node.getblock(self.lastblockhash)
self.lastblockheight = block['height']
self.lastblocktime = block['time']
# one change output at the end. The transaction is constructed on the Python side to enable
# having multiple outputs to the same address and outputs with no assigned address. The wallet
# is then asked to sign it through signrawtransactionwithwallet, and then added to a block on the
# Python side (to bypass standardness rules).
self.log.info("- Creating test UTXOs...")
random.shuffle(spenders)
normal_utxos = []
mismatching_utxos = [] # UTXOs with input that requires mismatching output position
done = 0
while done < len(spenders):
# Compute how many UTXOs to create with this transaction
count_this_tx = min(len(spenders) - done, (len(spenders) + 4) // 5, 10000)
fund_tx = CTransaction()
# Add the 50 highest-value inputs
unspents = node.listunspent()
random.shuffle(unspents)
unspents.sort(key=lambda x: int(x["amount"] * 100000000), reverse=True)
if len(unspents) > 50:
unspents = unspents[:50]
random.shuffle(unspents)
balance = 0
for unspent in unspents:
balance += int(unspent["amount"] * 100000000)
txid = int(unspent["txid"], 16)
fund_tx.vin.append(CTxIn(COutPoint(txid, int(unspent["vout"])), CScript()))
# Add outputs
cur_progress = done / len(spenders)
next_progress = (done + count_this_tx) / len(spenders)
change_goal = (1.0 - 0.6 * next_progress) / (1.0 - 0.6 * cur_progress) * balance
self.log.debug("Create %i UTXOs in a transaction spending %i inputs worth %.8f (sending ~%.8f to change)" % (count_this_tx, len(unspents), balance * 0.00000001, change_goal * 0.00000001))
for i in range(count_this_tx):
avg = (balance - change_goal) / (count_this_tx - i)
amount = int(random.randrange(int(avg*0.85 + 0.5), int(avg*1.15 + 0.5)) + 0.5)
balance -= amount
fund_tx.vout.append(CTxOut(amount, spenders[done + i].script))
# Add change
fund_tx.vout.append(CTxOut(balance - 10000, random.choice(host_spks)))
# Ask the wallet to sign
ss = BytesIO(bytes.fromhex(node.signrawtransactionwithwallet(fund_tx.serialize().hex())["hex"]))
fund_tx.deserialize(ss)
# Construct UTXOData entries
fund_tx.rehash()
for i in range(count_this_tx):
utxodata = UTXOData(outpoint=COutPoint(fund_tx.sha256, i), output=fund_tx.vout[i], spender=spenders[done])
if utxodata.spender.need_vin_vout_mismatch:
mismatching_utxos.append(utxodata)
else:
normal_utxos.append(utxodata)
done += 1
# Mine into a block
self.block_submit(node, [fund_tx], "Funding tx", None, random.choice(host_pubkeys), 10000, MAX_BLOCK_SIGOPS_WEIGHT, True, True)
# Consume groups of choice(input_coins) from utxos in a tx, testing the spenders.
self.log.info("- Running %i spending tests" % done)
random.shuffle(normal_utxos)
random.shuffle(mismatching_utxos)
assert done == len(normal_utxos) + len(mismatching_utxos)
left = done
while left:
# Construct CTransaction with random nVersion, nLocktime
tx = CTransaction()
tx.nVersion = random.choice([1, 2, random.randint(-0x80000000, 0x7fffffff)])
min_sequence = (tx.nVersion != 1 and tx.nVersion != 0) * 0x80000000 # The minimum sequence number to disable relative locktime
if random.choice([True, False]):
tx.nLockTime = random.randrange(LOCKTIME_THRESHOLD, self.lastblocktime - 7200) # all absolute locktimes in the past
else:
tx.nLockTime = random.randrange(self.lastblockheight + 1) # all block heights in the past
# Decide how many UTXOs to test with.
acceptable = [n for n in input_counts if n <= left and (left - n > max(input_counts) or (left - n) in [0] + input_counts)]
num_inputs = random.choice(acceptable)
# If we have UTXOs that require mismatching inputs/outputs left, include exactly one of those
# unless there is only one normal UTXO left (as tests with mismatching UTXOs require at least one
# normal UTXO to go in the first position), and we don't want to run out of normal UTXOs.
input_utxos = []
while len(mismatching_utxos) and (len(input_utxos) == 0 or len(normal_utxos) == 1):
input_utxos.append(mismatching_utxos.pop())
left -= 1
for _ in range(max(1, num_inputs - len(input_utxos))):
input_utxos.append(normal_utxos.pop())
left -= 1
while True:
random.shuffle(input_utxos)
if not input_utxos[0].spender.need_vin_vout_mismatch:
break
first_mismatch_input = None
for i in range(len(input_utxos)):
if input_utxos[i].spender.need_vin_vout_mismatch:
first_mismatch_input = i
assert first_mismatch_input is None or first_mismatch_input > 0
amount = sum(utxo.output.nValue for utxo in input_utxos)
fee = min(random.randrange(MIN_FEE * 2, MIN_FEE * 4), amount - DUST_LIMIT)
in_value = amount - fee
tx.vin = [CTxIn(outpoint=utxo.outpoint, nSequence=random.randint(min_sequence, 0xffffffff)) for utxo in input_utxos]
tx.wit.vtxinwit = [CTxInWitness() for _ in range(len(input_utxos))]
sigops_weight = sum(utxo.spender.sigops_weight for utxo in input_utxos)
self.log.debug("Test: %s" % (", ".join(utxo.spender.comment for utxo in input_utxos)))
num_outputs = random.choice(range(1, 1 + min(4, 4 if first_mismatch_input is None else first_mismatch_input)))
assert in_value >= 0 and fee - num_outputs * DUST_LIMIT >= MIN_FEE
for i in range(num_outputs):
tx.vout.append(CTxOut())
if in_value <= DUST_LIMIT:
tx.vout[-1].nValue = DUST_LIMIT
elif i < num_outputs - 1:
tx.vout[-1].nValue = in_value
else:
tx.vout[-1].nValue = random.randint(DUST_LIMIT, in_value)
in_value -= tx.vout[-1].nValue
tx.vout[-1].scriptPubKey = random.choice(host_spks)
sigops_weight += CScript(tx.vout[-1].scriptPubKey).GetSigOpCount(False) * WITNESS_SCALE_FACTOR
fee += in_value
assert fee >= 0
cb_pubkey = random.choice(host_pubkeys)
sigops_weight += 1 * WITNESS_SCALE_FACTOR
input_data = []
for i in range(len(input_utxos)):
fn = input_utxos[i].spender.sat_function
fail = None
success = fn(tx, i, [utxo.output for utxo in input_utxos], True)
if not input_utxos[i].spender.no_fail:
fail = fn(tx, i, [utxo.output for utxo in input_utxos], False)
input_data.append((fail, success))
if self.options.dump_tests:
dump_json_test(tx, input_utxos, i, success, fail)
for fail_input in list(range(len(input_utxos))) + [None]:
if fail_input is not None and input_utxos[fail_input].spender.no_fail:
continue
# Expected message with each input failure, may be None(which is ignored)
expected_fail_msg = None if fail_input is None else input_utxos[fail_input].spender.err_msg
# Fill inputs/witnesses
for i in range(len(input_utxos)):
tx.vin[i].scriptSig = input_data[i][i != fail_input][0]
tx.wit.vtxinwit[i].scriptWitness.stack = input_data[i][i != fail_input][1]
# Submit to mempool to check standardness
is_standard_tx = fail_input is None and all(utxo.spender.is_standard for utxo in input_utxos) and tx.nVersion >= 1 and tx.nVersion <= 2
tx.rehash()
msg = ','.join(utxo.spender.comment + ("*" if n == fail_input else "") for n, utxo in enumerate(input_utxos))
if is_standard_tx:
node.sendrawtransaction(tx.serialize().hex(), 0)
assert node.getmempoolentry(tx.hash) is not None, "Failed to accept into mempool: " + msg
else:
assert_raises_rpc_error(-26, None, node.sendrawtransaction, tx.serialize().hex(), 0)
# Submit in a block
self.block_submit(node, [tx], msg, witness=True, accept=fail_input is None, cb_pubkey=cb_pubkey, fees=fee, sigops_weight=sigops_weight, err_msg=expected_fail_msg)
if (len(spenders) - left) // 200 > (len(spenders) - left - len(input_utxos)) // 200:
self.log.info(" - %i tests done" % (len(spenders) - left))
assert left == 0
assert len(normal_utxos) == 0
assert len(mismatching_utxos) == 0
self.log.info(" - Done")
def run_test(self):
# Post-taproot activation tests go first (pre-taproot tests' blocks are invalid post-taproot).
self.log.info("Post-activation tests...")
self.nodes[1].generate(COINBASE_MATURITY + 1)
self.test_spenders(self.nodes[1], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3])
self.disconnect_nodes(0, 1)
self.connect_nodes(0, 1)
addr = self.nodes[0].getnewaddress()
unsp = self.nodes[1].listunspent()
unsp = sorted(unsp, key=lambda i: i['amount'], reverse=True)
unsp = unsp[:500]
rawtx = self.nodes[1].createrawtransaction(
inputs=[{
'txid': i['txid'],
'vout': i['vout']
} for i in unsp],
outputs={addr: sum(i['amount'] for i in unsp)}
)
rawtx = self.nodes[1].signrawtransactionwithwallet(rawtx)['hex']
block = create_block(tmpl=self.nodes[1].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS), txlist=[rawtx])
add_witness_commitment(block)
block.rehash()
block.solve()
assert_equal(None, self.nodes[1].submitblock(block.serialize().hex()))
self.sync_blocks()
self.log.info("Pre-activation tests...")
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[1])
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[2, 3])
if __name__ == '__main__':
TaprootTest().main()
| true | true |
f73c8ad4f6373cfbe5fad4e70b20d0bb9d5eed2c | 897 | py | Python | fastapi-api/src/tests/test_taxon.py | dataforgoodfr/batch8_ceebios | 4b2b2920c2f852e3424e05a53ef242c0f5b1f708 | [
"MIT"
] | 1 | 2021-02-25T09:44:46.000Z | 2021-02-25T09:44:46.000Z | fastapi-api/src/tests/test_taxon.py | dataforgoodfr/batch8_ceebios | 4b2b2920c2f852e3424e05a53ef242c0f5b1f708 | [
"MIT"
] | null | null | null | fastapi-api/src/tests/test_taxon.py | dataforgoodfr/batch8_ceebios | 4b2b2920c2f852e3424e05a53ef242c0f5b1f708 | [
"MIT"
] | 3 | 2020-09-30T17:58:27.000Z | 2020-12-09T16:59:44.000Z | import pytest
from app.routers import taxons
from fastapi.testclient import TestClient
TEST_JSON = {"gbif_id": 15, "canonical_name": "test", "rank": "class"}
TEST_JSON_0 = {
"gbif_id": 0,
"canonical_name": "Canis Lupus Familiaris",
"rank": "subspecies",
}
client = TestClient(taxons.router)
def test_read_taxon():
response = client.get(
"/taxon/0",
)
assert response.status_code == 200
assert response.json() == TEST_JSON_0
def test_post_taxon():
response = client.post("/taxon", json=TEST_JSON)
assert response.status_code == 200
assert response.json() == TEST_JSON
def test_existing_species():
with pytest.raises(Exception) as e:
response = client.post("/taxon", json=TEST_JSON_0)
assert response.status_code == 409
assert response.json() == {"msg": "species already exists"}
| 25.628571 | 71 | 0.647715 | import pytest
from app.routers import taxons
from fastapi.testclient import TestClient
TEST_JSON = {"gbif_id": 15, "canonical_name": "test", "rank": "class"}
TEST_JSON_0 = {
"gbif_id": 0,
"canonical_name": "Canis Lupus Familiaris",
"rank": "subspecies",
}
client = TestClient(taxons.router)
def test_read_taxon():
response = client.get(
"/taxon/0",
)
assert response.status_code == 200
assert response.json() == TEST_JSON_0
def test_post_taxon():
response = client.post("/taxon", json=TEST_JSON)
assert response.status_code == 200
assert response.json() == TEST_JSON
def test_existing_species():
with pytest.raises(Exception) as e:
response = client.post("/taxon", json=TEST_JSON_0)
assert response.status_code == 409
assert response.json() == {"msg": "species already exists"}
| true | true |
f73c8bb84783e010fba58c16673e44bb4d95fadd | 10,126 | py | Python | voxseg/run_cnnlstm.py | NickWilkinson37/voxseg | 6402a67c0b4ee68115070b6aa870199d1f43c5a2 | [
"MIT"
] | 31 | 2021-03-11T11:32:36.000Z | 2022-03-22T11:54:35.000Z | voxseg/run_cnnlstm.py | parkitny/voxseg | 6402a67c0b4ee68115070b6aa870199d1f43c5a2 | [
"MIT"
] | 6 | 2021-05-21T08:42:36.000Z | 2021-11-19T11:36:43.000Z | voxseg/run_cnnlstm.py | parkitny/voxseg | 6402a67c0b4ee68115070b6aa870199d1f43c5a2 | [
"MIT"
] | 2 | 2021-06-10T05:55:12.000Z | 2021-12-08T11:19:44.000Z | # Module for running CNN-BiLSTM vad model,
# may also be run directly as a script
# Author: Nick Wilkinson 2021
import argparse
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from typing import Tuple
from tensorflow.keras import models
from voxseg import utils
from scipy.signal import medfilt
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only use the first GPU, quick enough for decoding
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
except RuntimeError as e:
# Visible devices must be set before GPUs have been initialized
print(e)
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=10,inter_op_parallelism_threads=10)
sess = tf.compat.v1.Session(config=session_conf)
def decode(targets: pd.DataFrame, speech_thresh: float = 0.5, speech_w_music_thresh: float = 0.5, filt: int = 1) -> pd.DataFrame:
'''Function for converting target sequences within a pd.DataFrame to endpoints.
Args:
targets: A pd.DataFrame containing predicted targets (in array form) and metadata.
speech_thresh (optional): A decision threshold between 0 and 1 for the speech class, lower values
result in more frames being classified as speech. (Default: 0.5)
speech_w_music_thresh (optional): A decision threshold between 0 and 1 for the speech_with_music class.
Setting this threshold higher will filter out more music which may be desirable for ASR. (Default: 0.5)
filt (optional): a kernel size for the median filter to apply to the output labels for smoothing. (Default: 1)
Returns:
A pd.DataFrame containing speech segment endpoints and metadata.
'''
targets = targets.copy()
if targets['predicted-targets'].iloc[0].shape[-1] == 4:
prior = np.array([(1-speech_thresh) * speech_w_music_thresh,
speech_thresh * speech_w_music_thresh,
(1-speech_thresh) * (1-speech_w_music_thresh),
(1-speech_thresh) * speech_w_music_thresh])
temp = pd.concat([_targets_to_endpoints(medfilt([0 if (j*prior).argmax() == 1 else 1 for j in i], filt), 0.32) \
for i in targets['predicted-targets']], ignore_index=True)
elif targets['predicted-targets'].iloc[0].shape[-1] == 2:
prior = np.array([speech_thresh,
1-speech_thresh])
temp = pd.concat([_targets_to_endpoints(medfilt([0 if (j*prior).argmax() == 0 else 1 for j in i], filt), 0.32) \
for i in targets['predicted-targets']], ignore_index=True)
else:
print(f'ERROR: model provided has {targets["predicted-targets"].iloc[0].shape[-1]} outputs. Model expected to have 2 or 4 outputs.')
if 'start' in targets.columns:
targets['end'] = targets['start'] + temp['end']
targets['start'] = targets['start'] + temp['start']
else:
targets['start'] = temp['start']
targets['end'] = temp['end']
targets = targets.drop(['predicted-targets'], axis=1)
targets = targets.apply(pd.Series.explode).reset_index(drop=True)
targets['utterance-id'] = targets['recording-id'].astype(str) + '_' + \
((targets['start'] * 100).astype(int)).astype(str).str.zfill(7) + '_' + \
((targets['end'] * 100).astype(int)).astype(str).str.zfill(7)
return targets
def predict_targets(model: tf.keras.Model, features: pd.DataFrame) -> pd.DataFrame:
'''Function for applying a pretrained model to predict targets from features.
Args:
model: A pretrained tf.keras model.
features: A pd.DataFrame containing features and metadata.
Returns:
A pd.DataFrame containing predicted targets and metadata.
'''
targets = features.drop(['normalized-features'], axis=1)
print('------------------- Running VAD -------------------')
targets['predicted-targets'] = _predict(model, features['normalized-features'])
return targets
def to_data_dir(endpoints: pd.DataFrame, out_dir: str) -> None:
'''A function for generating a Kaldi-style data directory output of the dicovered speech segments.
Args:
endpoints: A pd.DataFrame containing speech segment endpoints and metadata.
out_dir: A path to an output directory where data files will be placed.
'''
if not os.path.exists(out_dir):
print(f'Directory {out_dir} does not exist, creating it.')
os.mkdir(out_dir)
endpoints[['recording-id', 'extended filename']].drop_duplicates().to_csv(
f'{out_dir}/wav.scp',sep=' ', index=False, header=False)
pd.concat([endpoints[['utterance-id', 'recording-id']], endpoints[['start', 'end']].astype(float).round(3)],
axis=1).to_csv(f'{out_dir}/segments', sep=' ', index=False, header=False)
def _predict(model: tf.keras.Model, col: pd.Series) -> pd.Series:
'''Auxiliary function used by predict_targets(). Applies a pretrained model to
each feature set in the 'normalized-features' or 'features' column of a pd.DataFrame
containing features and metadata.
Args:
model: A pretrained tf.keras model.
col: A column of a pd.DataFrame containing features.
Returns:
A pd.Series containing the predicted target sequences.
'''
targets = []
for features in col:
#temp = model.predict(utils.time_distribute(features, 15)[:,:,:,:,np.newaxis])
temp = model.predict(features[np.newaxis,:,:,:,np.newaxis])
targets.append(temp.reshape(-1, temp.shape[-1]))
return pd.Series(targets)
def _targets_to_endpoints(targets: np.ndarray, frame_length: float) -> pd.DataFrame:
'''Auxilory function used by decode() for converting a target sequence to endpoints.
Args:
targets: A binary np.ndarray of speech/nonspeech targets where 1 indicates the presence of speech.
frame_length: The length of each target in seconds.
Returns:
A pd.DataFrame, containing the speech segment start and end boundaries in arrays.
'''
starts = []
ends = []
state = 0
for n, i in enumerate(targets):
state, emmision = _update_fst(state, i)
if emmision == 'start':
starts.append(n)
elif emmision == 'end':
ends.append(n)
state, emmision = _update_fst(state, None)
if emmision == 'start':
starts.append(n)
elif emmision == 'end':
ends.append(n + 1)
starts = np.around(np.array([i * frame_length for i in starts]), 3)
ends = np.around(np.array([i * frame_length for i in ends]), 3)
return pd.DataFrame({'start': [starts],'end': [ends]})
def _update_fst(state: int, transition: int) -> Tuple[int, str]:
'''Auxiliary function used by _targets_to_endpoints() for updating finite state
transducer.
Args:
state: The current state.
transition: The input (the next binary target).
Returns:
A tuple consisting of the new state and the output ('start', 'end' or None,
representing a start, end or no endpoint detections respectively).
'''
if state == 0:
if transition == 0:
state = 1
return state, None
elif transition == 1:
state = 2
return state, 'start'
elif state == 1:
if transition == 0:
return state, None
elif transition == 1:
state = 2
return state, 'start'
elif transition is None:
state = 3
return state, None
elif state == 2:
if transition == 0:
state = 1
return state, 'end'
elif transition == 1:
return state, None
elif transition is None:
state = 3
return state, 'end'
# Handle args when run directly
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='run_cnnlstm.py',
description='Run a trained voice activity detector on extracted feature set.')
parser.add_argument('-s', '--speech_thresh', type=float,
help='a decision threshold value between (0,1) for speech vs non-speech, defaults to 0.5')
parser.add_argument('-m', '--speech_w_music_thresh', type=float,
help='a decision threshold value between (0,1) for speech_with_music vs non-speech, defaults to 0.5, \
increasing will remove more speech_with_music, useful for downsteam ASR')
parser.add_argument('-f', '--median_filter_kernel', type=int,
help='a kernel size for a median filter to smooth the output labels, defaults to 1 (no smoothing)')
parser.add_argument('-M', '--model_path', type=str,
help='a path to a trained vad model saved as in .h5 format, overrides default pretrained model')
parser.add_argument('feat_dir', type=str,
help='a path to a directory containing a feats.h5 file with extracted features')
parser.add_argument('out_dir', type=str,
help='a path to an output directory where the output segments will be saved')
args = parser.parse_args()
if args.speech_thresh is not None:
speech_thresh = args.speech_thresh
else:
speech_thresh = 0.5
if args.speech_w_music_thresh is not None:
speech_w_music_thresh = args.speech_w_music_thresh
else:
speech_w_music_thresh = 0.5
if args.median_filter_kernel is not None:
filt = args.median_filter_kernel
else:
filt = 1
feats = pd.read_hdf(f'{args.feat_dir}/feats.h5')
if args.model_path is not None:
model = models.load_model(args.model_path)
else:
model = models.load_model(f'{os.path.dirname(os.path.realpath(__file__))}/models/cnn_bilstm.h5')
targets = predict_targets(model, feats)
endpoints = decode(targets, speech_thresh, speech_w_music_thresh, filt)
to_data_dir(endpoints, args.out_dir)
| 42.191667 | 140 | 0.640529 |
import argparse
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from typing import Tuple
from tensorflow.keras import models
from voxseg import utils
from scipy.signal import medfilt
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
except RuntimeError as e:
print(e)
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=10,inter_op_parallelism_threads=10)
sess = tf.compat.v1.Session(config=session_conf)
def decode(targets: pd.DataFrame, speech_thresh: float = 0.5, speech_w_music_thresh: float = 0.5, filt: int = 1) -> pd.DataFrame:
targets = targets.copy()
if targets['predicted-targets'].iloc[0].shape[-1] == 4:
prior = np.array([(1-speech_thresh) * speech_w_music_thresh,
speech_thresh * speech_w_music_thresh,
(1-speech_thresh) * (1-speech_w_music_thresh),
(1-speech_thresh) * speech_w_music_thresh])
temp = pd.concat([_targets_to_endpoints(medfilt([0 if (j*prior).argmax() == 1 else 1 for j in i], filt), 0.32) \
for i in targets['predicted-targets']], ignore_index=True)
elif targets['predicted-targets'].iloc[0].shape[-1] == 2:
prior = np.array([speech_thresh,
1-speech_thresh])
temp = pd.concat([_targets_to_endpoints(medfilt([0 if (j*prior).argmax() == 0 else 1 for j in i], filt), 0.32) \
for i in targets['predicted-targets']], ignore_index=True)
else:
print(f'ERROR: model provided has {targets["predicted-targets"].iloc[0].shape[-1]} outputs. Model expected to have 2 or 4 outputs.')
if 'start' in targets.columns:
targets['end'] = targets['start'] + temp['end']
targets['start'] = targets['start'] + temp['start']
else:
targets['start'] = temp['start']
targets['end'] = temp['end']
targets = targets.drop(['predicted-targets'], axis=1)
targets = targets.apply(pd.Series.explode).reset_index(drop=True)
targets['utterance-id'] = targets['recording-id'].astype(str) + '_' + \
((targets['start'] * 100).astype(int)).astype(str).str.zfill(7) + '_' + \
((targets['end'] * 100).astype(int)).astype(str).str.zfill(7)
return targets
def predict_targets(model: tf.keras.Model, features: pd.DataFrame) -> pd.DataFrame:
targets = features.drop(['normalized-features'], axis=1)
print('------------------- Running VAD -------------------')
targets['predicted-targets'] = _predict(model, features['normalized-features'])
return targets
def to_data_dir(endpoints: pd.DataFrame, out_dir: str) -> None:
if not os.path.exists(out_dir):
print(f'Directory {out_dir} does not exist, creating it.')
os.mkdir(out_dir)
endpoints[['recording-id', 'extended filename']].drop_duplicates().to_csv(
f'{out_dir}/wav.scp',sep=' ', index=False, header=False)
pd.concat([endpoints[['utterance-id', 'recording-id']], endpoints[['start', 'end']].astype(float).round(3)],
axis=1).to_csv(f'{out_dir}/segments', sep=' ', index=False, header=False)
def _predict(model: tf.keras.Model, col: pd.Series) -> pd.Series:
targets = []
for features in col:
temp = model.predict(features[np.newaxis,:,:,:,np.newaxis])
targets.append(temp.reshape(-1, temp.shape[-1]))
return pd.Series(targets)
def _targets_to_endpoints(targets: np.ndarray, frame_length: float) -> pd.DataFrame:
starts = []
ends = []
state = 0
for n, i in enumerate(targets):
state, emmision = _update_fst(state, i)
if emmision == 'start':
starts.append(n)
elif emmision == 'end':
ends.append(n)
state, emmision = _update_fst(state, None)
if emmision == 'start':
starts.append(n)
elif emmision == 'end':
ends.append(n + 1)
starts = np.around(np.array([i * frame_length for i in starts]), 3)
ends = np.around(np.array([i * frame_length for i in ends]), 3)
return pd.DataFrame({'start': [starts],'end': [ends]})
def _update_fst(state: int, transition: int) -> Tuple[int, str]:
if state == 0:
if transition == 0:
state = 1
return state, None
elif transition == 1:
state = 2
return state, 'start'
elif state == 1:
if transition == 0:
return state, None
elif transition == 1:
state = 2
return state, 'start'
elif transition is None:
state = 3
return state, None
elif state == 2:
if transition == 0:
state = 1
return state, 'end'
elif transition == 1:
return state, None
elif transition is None:
state = 3
return state, 'end'
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='run_cnnlstm.py',
description='Run a trained voice activity detector on extracted feature set.')
parser.add_argument('-s', '--speech_thresh', type=float,
help='a decision threshold value between (0,1) for speech vs non-speech, defaults to 0.5')
parser.add_argument('-m', '--speech_w_music_thresh', type=float,
help='a decision threshold value between (0,1) for speech_with_music vs non-speech, defaults to 0.5, \
increasing will remove more speech_with_music, useful for downsteam ASR')
parser.add_argument('-f', '--median_filter_kernel', type=int,
help='a kernel size for a median filter to smooth the output labels, defaults to 1 (no smoothing)')
parser.add_argument('-M', '--model_path', type=str,
help='a path to a trained vad model saved as in .h5 format, overrides default pretrained model')
parser.add_argument('feat_dir', type=str,
help='a path to a directory containing a feats.h5 file with extracted features')
parser.add_argument('out_dir', type=str,
help='a path to an output directory where the output segments will be saved')
args = parser.parse_args()
if args.speech_thresh is not None:
speech_thresh = args.speech_thresh
else:
speech_thresh = 0.5
if args.speech_w_music_thresh is not None:
speech_w_music_thresh = args.speech_w_music_thresh
else:
speech_w_music_thresh = 0.5
if args.median_filter_kernel is not None:
filt = args.median_filter_kernel
else:
filt = 1
feats = pd.read_hdf(f'{args.feat_dir}/feats.h5')
if args.model_path is not None:
model = models.load_model(args.model_path)
else:
model = models.load_model(f'{os.path.dirname(os.path.realpath(__file__))}/models/cnn_bilstm.h5')
targets = predict_targets(model, feats)
endpoints = decode(targets, speech_thresh, speech_w_music_thresh, filt)
to_data_dir(endpoints, args.out_dir)
| true | true |
f73c8c208a7bba2c7b9281e9efe17e5ea934f641 | 4,427 | py | Python | lib/tinycss/tests/test_page3.py | imtiaz-emu/gcp-flask-test | 096f466242aa14941712ab8ea06ac4fb4eaeb993 | [
"Apache-2.0"
] | null | null | null | lib/tinycss/tests/test_page3.py | imtiaz-emu/gcp-flask-test | 096f466242aa14941712ab8ea06ac4fb4eaeb993 | [
"Apache-2.0"
] | 9 | 2020-02-11T23:31:54.000Z | 2022-03-11T23:15:44.000Z | lib/tinycss/tests/test_page3.py | imtiaz-emu/gcp-flask-test | 096f466242aa14941712ab8ea06ac4fb4eaeb993 | [
"Apache-2.0"
] | null | null | null | # coding: utf8
"""
Tests for the Paged Media 3 parser
----------------------------------
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals
import pytest
from tinycss.css21 import CSS21Parser
from tinycss.page3 import CSSPage3Parser
from .test_tokenizer import jsonify
from . import assert_errors
@pytest.mark.parametrize(('css', 'expected_selector',
'expected_specificity', 'expected_errors'), [
('@page {}', (None, None), (0, 0, 0), []),
('@page :first {}', (None, 'first'), (0, 1, 0), []),
('@page:left{}', (None, 'left'), (0, 0, 1), []),
('@page :right {}', (None, 'right'), (0, 0, 1), []),
('@page :blank{}', (None, 'blank'), (0, 1, 0), []),
('@page :last {}', None, None, ['invalid @page selector']),
('@page : first {}', None, None, ['invalid @page selector']),
('@page foo:first {}', ('foo', 'first'), (1, 1, 0), []),
('@page bar :left {}', ('bar', 'left'), (1, 0, 1), []),
(r'@page \26:right {}', ('&', 'right'), (1, 0, 1), []),
('@page foo {}', ('foo', None), (1, 0, 0), []),
(r'@page \26 {}', ('&', None), (1, 0, 0), []),
('@page foo fist {}', None, None, ['invalid @page selector']),
('@page foo, bar {}', None, None, ['invalid @page selector']),
('@page foo&first {}', None, None, ['invalid @page selector']),
])
def test_selectors(css, expected_selector, expected_specificity,
expected_errors):
stylesheet = CSSPage3Parser().parse_stylesheet(css)
assert_errors(stylesheet.errors, expected_errors)
if stylesheet.rules:
assert len(stylesheet.rules) == 1
rule = stylesheet.rules[0]
assert rule.at_keyword == '@page'
selector = rule.selector
assert rule.specificity == expected_specificity
else:
selector = None
assert selector == expected_selector
@pytest.mark.parametrize(('css', 'expected_declarations',
'expected_rules','expected_errors'), [
('@page {}', [], [], []),
('@page { foo: 4; bar: z }',
[('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])], [], []),
('''@page { foo: 4;
@top-center { content: "Awesome Title" }
@bottom-left { content: counter(page) }
bar: z
}''',
[('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])],
[('@top-center', [('content', [('STRING', 'Awesome Title')])]),
('@bottom-left', [('content', [
('FUNCTION', 'counter', [('IDENT', 'page')])])])],
[]),
('''@page { foo: 4;
@bottom-top { content: counter(page) }
bar: z
}''',
[('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])],
[],
['unknown at-rule in @page context: @bottom-top']),
('@page{} @top-right{}', [], [], [
'@top-right rule not allowed in stylesheet']),
('@page{ @top-right 4 {} }', [], [], [
'unexpected INTEGER token in @top-right rule header']),
# Not much error recovery tests here. This should be covered in test_css21
])
def test_content(css, expected_declarations, expected_rules, expected_errors):
stylesheet = CSSPage3Parser().parse_stylesheet(css)
assert_errors(stylesheet.errors, expected_errors)
def declarations(rule):
return [(decl.name, list(jsonify(decl.value)))
for decl in rule.declarations]
assert len(stylesheet.rules) == 1
rule = stylesheet.rules[0]
assert rule.at_keyword == '@page'
assert declarations(rule) == expected_declarations
rules = [(margin_rule.at_keyword, declarations(margin_rule))
for margin_rule in rule.at_rules]
assert rules == expected_rules
def test_in_at_media():
css = '@media print { @page { size: A4 } }'
stylesheet = CSS21Parser().parse_stylesheet(css)
assert_errors(stylesheet.errors, ['@page rule not allowed in @media'])
at_media_rule, = stylesheet.rules
assert at_media_rule.at_keyword == '@media'
assert at_media_rule.rules == []
stylesheet = CSSPage3Parser().parse_stylesheet(css)
assert stylesheet.errors == []
at_media_rule, = stylesheet.rules
at_page_rule, = at_media_rule.rules
assert at_media_rule.at_keyword == '@media'
assert at_page_rule.at_keyword == '@page'
assert len(at_page_rule.declarations) == 1
| 36.586777 | 78 | 0.563587 |
from __future__ import unicode_literals
import pytest
from tinycss.css21 import CSS21Parser
from tinycss.page3 import CSSPage3Parser
from .test_tokenizer import jsonify
from . import assert_errors
@pytest.mark.parametrize(('css', 'expected_selector',
'expected_specificity', 'expected_errors'), [
('@page {}', (None, None), (0, 0, 0), []),
('@page :first {}', (None, 'first'), (0, 1, 0), []),
('@page:left{}', (None, 'left'), (0, 0, 1), []),
('@page :right {}', (None, 'right'), (0, 0, 1), []),
('@page :blank{}', (None, 'blank'), (0, 1, 0), []),
('@page :last {}', None, None, ['invalid @page selector']),
('@page : first {}', None, None, ['invalid @page selector']),
('@page foo:first {}', ('foo', 'first'), (1, 1, 0), []),
('@page bar :left {}', ('bar', 'left'), (1, 0, 1), []),
(r'@page \26:right {}', ('&', 'right'), (1, 0, 1), []),
('@page foo {}', ('foo', None), (1, 0, 0), []),
(r'@page \26 {}', ('&', None), (1, 0, 0), []),
('@page foo fist {}', None, None, ['invalid @page selector']),
('@page foo, bar {}', None, None, ['invalid @page selector']),
('@page foo&first {}', None, None, ['invalid @page selector']),
])
def test_selectors(css, expected_selector, expected_specificity,
expected_errors):
stylesheet = CSSPage3Parser().parse_stylesheet(css)
assert_errors(stylesheet.errors, expected_errors)
if stylesheet.rules:
assert len(stylesheet.rules) == 1
rule = stylesheet.rules[0]
assert rule.at_keyword == '@page'
selector = rule.selector
assert rule.specificity == expected_specificity
else:
selector = None
assert selector == expected_selector
@pytest.mark.parametrize(('css', 'expected_declarations',
'expected_rules','expected_errors'), [
('@page {}', [], [], []),
('@page { foo: 4; bar: z }',
[('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])], [], []),
('''@page { foo: 4;
@top-center { content: "Awesome Title" }
@bottom-left { content: counter(page) }
bar: z
}''',
[('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])],
[('@top-center', [('content', [('STRING', 'Awesome Title')])]),
('@bottom-left', [('content', [
('FUNCTION', 'counter', [('IDENT', 'page')])])])],
[]),
('''@page { foo: 4;
@bottom-top { content: counter(page) }
bar: z
}''',
[('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])],
[],
['unknown at-rule in @page context: @bottom-top']),
('@page{} @top-right{}', [], [], [
'@top-right rule not allowed in stylesheet']),
('@page{ @top-right 4 {} }', [], [], [
'unexpected INTEGER token in @top-right rule header']),
])
def test_content(css, expected_declarations, expected_rules, expected_errors):
stylesheet = CSSPage3Parser().parse_stylesheet(css)
assert_errors(stylesheet.errors, expected_errors)
def declarations(rule):
return [(decl.name, list(jsonify(decl.value)))
for decl in rule.declarations]
assert len(stylesheet.rules) == 1
rule = stylesheet.rules[0]
assert rule.at_keyword == '@page'
assert declarations(rule) == expected_declarations
rules = [(margin_rule.at_keyword, declarations(margin_rule))
for margin_rule in rule.at_rules]
assert rules == expected_rules
def test_in_at_media():
css = '@media print { @page { size: A4 } }'
stylesheet = CSS21Parser().parse_stylesheet(css)
assert_errors(stylesheet.errors, ['@page rule not allowed in @media'])
at_media_rule, = stylesheet.rules
assert at_media_rule.at_keyword == '@media'
assert at_media_rule.rules == []
stylesheet = CSSPage3Parser().parse_stylesheet(css)
assert stylesheet.errors == []
at_media_rule, = stylesheet.rules
at_page_rule, = at_media_rule.rules
assert at_media_rule.at_keyword == '@media'
assert at_page_rule.at_keyword == '@page'
assert len(at_page_rule.declarations) == 1
| true | true |
f73c8edfb84fb6d2c35538e7c59e8a0be53982d8 | 492 | py | Python | yas3/aws.py | DavidMChan/yas3 | eafccc47147a3ca03c36dee4e6b40775cb17a8b0 | [
"Apache-2.0"
] | 1 | 2020-11-03T18:50:30.000Z | 2020-11-03T18:50:30.000Z | yas3/aws.py | DavidMChan/yas3 | eafccc47147a3ca03c36dee4e6b40775cb17a8b0 | [
"Apache-2.0"
] | null | null | null | yas3/aws.py | DavidMChan/yas3 | eafccc47147a3ca03c36dee4e6b40775cb17a8b0 | [
"Apache-2.0"
] | null | null | null |
def test_aws_acl(acl_string):
if acl_string not in ('private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read',
'bucket-owner-read', 'bucket-owner-full-control'):
raise ValueError('ACL string must be one of: ',
('private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read',
'bucket-owner-read', 'bucket-owner-full-control'))
return True
| 49.2 | 114 | 0.571138 |
def test_aws_acl(acl_string):
if acl_string not in ('private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read',
'bucket-owner-read', 'bucket-owner-full-control'):
raise ValueError('ACL string must be one of: ',
('private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read',
'bucket-owner-read', 'bucket-owner-full-control'))
return True
| true | true |
f73c8f05cbcedd56236c6d96bdd45ddf527b7643 | 203 | py | Python | host/serializers.py | FGAUnB-REQ-GM/2021.2-PousadaAnimal | b7371aebccad0da23073de0db642a6ce824f919e | [
"MIT"
] | null | null | null | host/serializers.py | FGAUnB-REQ-GM/2021.2-PousadaAnimal | b7371aebccad0da23073de0db642a6ce824f919e | [
"MIT"
] | 95 | 2022-02-04T19:40:09.000Z | 2022-03-31T20:24:11.000Z | host/serializers.py | FGAUnB-REQ-GM/2021.2-PousadaAnimal | b7371aebccad0da23073de0db642a6ce824f919e | [
"MIT"
] | 4 | 2022-01-26T23:51:48.000Z | 2022-01-27T18:28:16.000Z | from rest_framework import serializers
from .models import Host
class HostSerializer(serializers.ModelSerializer):
class Meta:
model = Host
fields = ['id','species', 'size', 'cost']
| 25.375 | 50 | 0.694581 | from rest_framework import serializers
from .models import Host
class HostSerializer(serializers.ModelSerializer):
class Meta:
model = Host
fields = ['id','species', 'size', 'cost']
| true | true |
f73c8f1b878026de4fc960180245cc9940c99ebe | 1,554 | py | Python | nemo_text_processing/inverse_text_normalization/ru/taggers/electronic.py | lifefeel/NeMo | 85b7d3f0288d42c234e70546179974b6f4d13536 | [
"Apache-2.0"
] | null | null | null | nemo_text_processing/inverse_text_normalization/ru/taggers/electronic.py | lifefeel/NeMo | 85b7d3f0288d42c234e70546179974b6f4d13536 | [
"Apache-2.0"
] | 1 | 2022-03-06T14:09:02.000Z | 2022-03-06T14:09:02.000Z | nemo_text_processing/inverse_text_normalization/ru/taggers/electronic.py | admariner/NeMo | e542d7f9063a40afa4119a3b94de4c2c636a37bb | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for classifying electronic, e.g.
"эй би собака эн ди точка ру" -> electronic { username: "ab@nd.ru" }
Args:
tn_electronic: Text normalization Electronic graph
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, tn_electronic, deterministic: bool = True):
super().__init__(name="electronic", kind="classify", deterministic=deterministic)
graph = tn_electronic.final_graph
graph = graph.invert().optimize()
graph = pynutil.insert("username: \"") + graph + pynutil.insert("\"")
graph = self.add_tokens(graph)
self.fst = graph.optimize()
| 39.846154 | 94 | 0.718147 |
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
def __init__(self, tn_electronic, deterministic: bool = True):
super().__init__(name="electronic", kind="classify", deterministic=deterministic)
graph = tn_electronic.final_graph
graph = graph.invert().optimize()
graph = pynutil.insert("username: \"") + graph + pynutil.insert("\"")
graph = self.add_tokens(graph)
self.fst = graph.optimize()
| true | true |
f73c90cb1bc3b129ccc520e873087e69e1f796d6 | 1,160 | py | Python | src/api/util/RDP.py | hostbaby/RedisLive | a1b655483c08c80ca1aa91eed87e434d458bcfb7 | [
"MIT"
] | 1,523 | 2015-01-02T07:07:24.000Z | 2022-03-09T09:11:50.000Z | src/api/util/RDP.py | hostbaby/RedisLive | a1b655483c08c80ca1aa91eed87e434d458bcfb7 | [
"MIT"
] | 41 | 2015-01-05T15:43:48.000Z | 2022-02-05T02:17:28.000Z | src/api/util/RDP.py | hostbaby/RedisLive | a1b655483c08c80ca1aa91eed87e434d458bcfb7 | [
"MIT"
] | 348 | 2015-01-05T03:28:10.000Z | 2022-03-09T02:33:56.000Z | """
The Ramer-Douglas-Peucker algorithm roughly ported from the pseudo-code provided
by http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
"""
from math import sqrt
def distance(a, b):
return sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def point_line_distance(point, start, end):
if (start == end):
return distance(point, start)
else:
n = abs(
(end[0] - start[0]) * (start[1] - point[1]) - (start[0] - point[0]) * (end[1] - start[1])
)
d = sqrt(
(end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2
)
return n / d
def rdp(points, epsilon):
"""
Reduces a series of points to a simplified version that loses detail, but
maintains the general shape of the series.
"""
dmax = 0.0
index = 0
for i in range(1, len(points) - 1):
d = point_line_distance(points[i], points[0], points[-1])
if d > dmax:
index = i
dmax = d
if dmax >= epsilon:
results = rdp(points[:index+1], epsilon)[:-1] + rdp(points[index:], epsilon)
else:
results = [points[0], points[-1]]
return results | 29.74359 | 101 | 0.55 |
from math import sqrt
def distance(a, b):
return sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def point_line_distance(point, start, end):
if (start == end):
return distance(point, start)
else:
n = abs(
(end[0] - start[0]) * (start[1] - point[1]) - (start[0] - point[0]) * (end[1] - start[1])
)
d = sqrt(
(end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2
)
return n / d
def rdp(points, epsilon):
dmax = 0.0
index = 0
for i in range(1, len(points) - 1):
d = point_line_distance(points[i], points[0], points[-1])
if d > dmax:
index = i
dmax = d
if dmax >= epsilon:
results = rdp(points[:index+1], epsilon)[:-1] + rdp(points[index:], epsilon)
else:
results = [points[0], points[-1]]
return results | true | true |
f73c915c4e464074da4d7508c7c611be4d49dd01 | 16,011 | py | Python | gnomad/variant_qc/evaluation.py | broadinstitute/gnomad_hail | 382fc2c7976d58cc8983cc4c9f0df5d8d5f9fae3 | [
"MIT"
] | 20 | 2017-10-17T00:33:20.000Z | 2020-02-21T20:07:10.000Z | gnomad/variant_qc/evaluation.py | broadinstitute/gnomad_hail | 382fc2c7976d58cc8983cc4c9f0df5d8d5f9fae3 | [
"MIT"
] | 49 | 2017-10-04T04:04:31.000Z | 2020-03-03T16:04:34.000Z | gnomad/variant_qc/evaluation.py | broadinstitute/gnomad_hail | 382fc2c7976d58cc8983cc4c9f0df5d8d5f9fae3 | [
"MIT"
] | 9 | 2017-10-04T13:47:27.000Z | 2019-11-23T02:29:55.000Z | # noqa: D100
import logging
from typing import Dict, Optional
import hail as hl
logging.basicConfig(
format="%(asctime)s (%(name)s %(lineno)s): %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def compute_ranked_bin(
ht: hl.Table,
score_expr: hl.expr.NumericExpression,
bin_expr: Dict[str, hl.expr.BooleanExpression] = {"bin": True},
compute_snv_indel_separately: bool = True,
n_bins: int = 100,
desc: bool = True,
) -> hl.Table:
r"""
Return a table with a bin for each row based on the ranking of `score_expr`.
The bin is computed by dividing the `score_expr` into `n_bins` bins containing approximately equal numbers of elements.
This is done by ranking the rows by `score_expr` (and a random number in cases where multiple variants have the same score)
and then assigning the variant to a bin based on its ranking.
If `compute_snv_indel_separately` is True all items in `bin_expr` will be stratified by snv / indels for the ranking and
bin calculation. Because SNV and indel rows are mutually exclusive, they are re-combined into a single annotation. For
example if we have the following four variants and scores and `n_bins` of 2:
======== ======= ====== ================= =================
Variant Type Score bin - `compute_snv_indel_separately`:
-------- ------- ------ -------------------------------------
\ \ \ False True
======== ======= ====== ================= =================
Var1 SNV 0.1 1 1
Var2 SNV 0.2 1 2
Var3 Indel 0.3 2 1
Var4 Indel 0.4 2 2
======== ======= ====== ================= =================
.. note::
The `bin_expr` defines which data the bin(s) should be computed on. E.g., to get biallelic specific binning
and singleton specific binning, the following could be used:
.. code-block:: python
bin_expr={
'biallelic_bin': ~ht.was_split,
'singleton_bin': ht.singleton
}
:param ht: Input Table
:param score_expr: Expression containing the score
:param bin_expr: Specific row grouping(s) to perform ranking and binning on (see note)
:param compute_snv_indel_separately: Should all `bin_expr` items be stratified by SNVs / indels
:param n_bins: Number of bins to bin the data into
:param desc: Whether to bin the score in descending order
:return: Table with the requested bin annotations
"""
if compute_snv_indel_separately:
# For each bin, add a SNV / indel stratification
bin_expr = {
f"{bin_id}_{snv}": (bin_expr & snv_expr)
for bin_id, bin_expr in bin_expr.items()
for snv, snv_expr in [
("snv", hl.is_snp(ht.alleles[0], ht.alleles[1])),
("indel", ~hl.is_snp(ht.alleles[0], ht.alleles[1])),
]
}
bin_ht = ht.select(
**{f"_filter_{bin_id}": bin_expr for bin_id, bin_expr in bin_expr.items()},
_score=score_expr,
snv=hl.is_snp(ht.alleles[0], ht.alleles[1]),
_rand=hl.rand_unif(0, 1),
)
logger.info(
"Sorting the HT by score_expr followed by a random float between 0 and 1. "
"Then adding a row index per grouping defined by bin_expr..."
)
bin_ht = bin_ht.order_by("_score", "_rand")
bin_ht = bin_ht.annotate(
**{
f"{bin_id}_rank": hl.or_missing(
bin_ht[f"_filter_{bin_id}"],
hl.scan.count_where(bin_ht[f"_filter_{bin_id}"]),
)
for bin_id in bin_expr
}
)
bin_ht = bin_ht.key_by("locus", "alleles")
# Annotate globals with variant counts per group defined by bin_expr. This is used to determine bin assignment
bin_ht = bin_ht.annotate_globals(
bin_group_variant_counts=bin_ht.aggregate(
hl.Struct(
**{
bin_id: hl.agg.filter(
bin_ht[f"_filter_{bin_id}"],
hl.agg.count(),
)
for bin_id in bin_expr
}
)
)
)
logger.info("Binning ranked rows into %d bins...", n_bins)
bin_ht = bin_ht.select(
"snv",
**{
bin_id: hl.int(
hl.floor(
(
n_bins
* (
bin_ht[f"{bin_id}_rank"]
/ hl.float64(bin_ht.bin_group_variant_counts[bin_id])
)
)
+ 1
)
)
for bin_id in bin_expr
},
)
if desc:
bin_ht = bin_ht.annotate(
**{bin_id: n_bins - bin_ht[bin_id] + 1 for bin_id in bin_expr}
)
# Because SNV and indel rows are mutually exclusive, re-combine them into a single bin.
# Update the global bin_group_variant_counts struct to reflect the change in bin names in the table
if compute_snv_indel_separately:
bin_expr_no_snv = {
bin_id.rsplit("_", 1)[0] for bin_id in bin_ht.bin_group_variant_counts
}
bin_ht = bin_ht.annotate_globals(
bin_group_variant_counts=hl.struct(
**{
bin_id: hl.struct(
**{
snv: bin_ht.bin_group_variant_counts[f"{bin_id}_{snv}"]
for snv in ["snv", "indel"]
}
)
for bin_id in bin_expr_no_snv
}
)
)
bin_ht = bin_ht.transmute(
**{
bin_id: hl.if_else(
bin_ht.snv,
bin_ht[f"{bin_id}_snv"],
bin_ht[f"{bin_id}_indel"],
)
for bin_id in bin_expr_no_snv
}
)
return bin_ht
def compute_grouped_binned_ht(
bin_ht: hl.Table,
checkpoint_path: Optional[str] = None,
) -> hl.GroupedTable:
"""
Group a Table that has been annotated with bins (`compute_ranked_bin` or `create_binned_ht`).
The table will be grouped by bin_id (bin, biallelic, etc.), contig, snv, bi_allelic and singleton.
.. note::
If performing an aggregation following this grouping (such as `score_bin_agg`) then the aggregation
function will need to use `ht._parent` to get the origin Table from the GroupedTable for the aggregation
:param bin_ht: Input Table with a `bin_id` annotation
:param checkpoint_path: If provided an intermediate checkpoint table is created with all required annotations before shuffling.
:return: Table grouped by bins(s)
"""
# Explode the rank table by bin_id
bin_ht = bin_ht.annotate(
bin_groups=hl.array(
[
hl.Struct(bin_id=bin_name, bin=bin_ht[bin_name])
for bin_name in bin_ht.bin_group_variant_counts
]
)
)
bin_ht = bin_ht.explode(bin_ht.bin_groups)
bin_ht = bin_ht.transmute(
bin_id=bin_ht.bin_groups.bin_id, bin=bin_ht.bin_groups.bin
)
bin_ht = bin_ht.filter(hl.is_defined(bin_ht.bin))
if checkpoint_path is not None:
bin_ht.checkpoint(checkpoint_path, overwrite=True)
else:
bin_ht = bin_ht.persist()
# Group by bin_id, bin and additional stratification desired and compute QC metrics per bin
return bin_ht.group_by(
bin_id=bin_ht.bin_id,
contig=bin_ht.locus.contig,
snv=hl.is_snp(bin_ht.alleles[0], bin_ht.alleles[1]),
bi_allelic=~bin_ht.was_split,
singleton=bin_ht.singleton,
release_adj=bin_ht.ac > 0,
bin=bin_ht.bin,
)._set_buffer_size(20000)
def compute_binned_truth_sample_concordance(
ht: hl.Table,
binned_score_ht: hl.Table,
n_bins: int = 100,
add_bins: Dict[str, hl.expr.BooleanExpression] = {},
) -> hl.Table:
"""
Determine the concordance (TP, FP, FN) between a truth sample within the callset and the samples truth data grouped by bins computed using `compute_ranked_bin`.
.. note::
The input 'ht` should contain three row fields:
- score: value to use for binning
- GT: a CallExpression containing the genotype of the evaluation data for the sample
- truth_GT: a CallExpression containing the genotype of the truth sample
The input `binned_score_ht` should contain:
- score: value used to bin the full callset
- bin: the full callset bin
'add_bins` can be used to add additional global and truth sample binning to the final binned truth sample
concordance HT. The keys in `add_bins` must be present in `binned_score_ht` and the values in `add_bins`
should be expressions on `ht` that define a subset of variants to bin in the truth sample. An example is if we want
to look at the global and truth sample binning on only bi-allelic variants. `add_bins` could be set to
{'biallelic_bin': ht.biallelic}.
The table is grouped by global/truth sample bin and variant type and contains TP, FP and FN.
:param ht: Input HT
:param binned_score_ht: Table with the bin annotation for each variant
:param n_bins: Number of bins to bin the data into
:param add_bins: Dictionary of additional global bin columns (key) and the expr to use for binning the truth sample (value)
:return: Binned truth sample concordance HT
"""
# Annotate score and global bin
indexed_binned_score_ht = binned_score_ht[ht.key]
ht = ht.annotate(
**{f"global_{bin_id}": indexed_binned_score_ht[bin_id] for bin_id in add_bins},
**{f"_{bin_id}": bin_expr for bin_id, bin_expr in add_bins.items()},
score=indexed_binned_score_ht.score,
global_bin=indexed_binned_score_ht.bin,
)
# Annotate the truth sample bin
bin_ht = compute_ranked_bin(
ht,
score_expr=ht.score,
bin_expr={
"truth_sample_bin": hl.expr.bool(True),
**{f"truth_sample_{bin_id}": ht[f"_{bin_id}"] for bin_id in add_bins},
},
n_bins=n_bins,
)
ht = ht.join(bin_ht, how="left")
bin_list = [
hl.tuple(["global_bin", ht.global_bin]),
hl.tuple(["truth_sample_bin", ht.truth_sample_bin]),
]
bin_list.extend(
[hl.tuple([f"global_{bin_id}", ht[f"global_{bin_id}"]]) for bin_id in add_bins]
)
bin_list.extend(
[
hl.tuple([f"truth_sample_{bin_id}", ht[f"truth_sample_{bin_id}"]])
for bin_id in add_bins
]
)
# Explode the global and truth sample bins
ht = ht.annotate(bin=bin_list)
ht = ht.explode(ht.bin)
ht = ht.annotate(bin_id=ht.bin[0], bin=hl.int(ht.bin[1]))
# Compute TP, FP and FN by bin_id, variant type and bin
return (
ht.group_by("bin_id", "snv", "bin")
.aggregate(
# TP => allele is found in both data sets
tp=hl.agg.count_where(ht.GT.is_non_ref() & ht.truth_GT.is_non_ref()),
# FP => allele is found only in test data set
fp=hl.agg.count_where(
ht.GT.is_non_ref() & hl.or_else(ht.truth_GT.is_hom_ref(), True)
),
# FN => allele is found in truth data only
fn=hl.agg.count_where(
hl.or_else(ht.GT.is_hom_ref(), True) & ht.truth_GT.is_non_ref()
),
min_score=hl.agg.min(ht.score),
max_score=hl.agg.max(ht.score),
n_alleles=hl.agg.count(),
)
.repartition(5)
)
def create_truth_sample_ht(
mt: hl.MatrixTable, truth_mt: hl.MatrixTable, high_confidence_intervals_ht: hl.Table
) -> hl.Table:
"""
Compute a table comparing a truth sample in callset vs the truth.
:param mt: MT of truth sample from callset to be compared to truth
:param truth_mt: MT of truth sample
:param high_confidence_intervals_ht: High confidence interval HT
:return: Table containing both the callset truth sample and the truth data
"""
def split_filter_and_flatten_ht(
truth_mt: hl.MatrixTable, high_confidence_intervals_ht: hl.Table
) -> hl.Table:
"""
Split a truth sample MT, filter it to the given high confidence intervals, and then "flatten" it as a HT by annotating GT in a row field.
:param truth_mt: Truth sample MT
:param high_confidence_intervals_ht: High confidence intervals
:return: Truth sample table with GT as a row annotation
"""
assert truth_mt.count_cols() == 1
if not "was_split" in truth_mt.row:
truth_mt = hl.split_multi_hts(truth_mt)
truth_mt = truth_mt.filter_rows(
hl.is_defined(high_confidence_intervals_ht[truth_mt.locus])
)
rename_entries = {"GT": "_GT"}
if "adj" in truth_mt.entry:
rename_entries.update({"adj": "_adj"})
truth_mt = truth_mt.rename(rename_entries)
return truth_mt.annotate_rows(
**{x: hl.agg.take(truth_mt[f"_{x}"], 1)[0] for x in rename_entries}
).rows()
# Load truth sample MT,
# restrict it to high confidence intervals
# and flatten it to a HT by annotating GT in a row annotation
truth_ht = split_filter_and_flatten_ht(truth_mt, high_confidence_intervals_ht)
truth_ht = truth_ht.rename({f: f"truth_{f}" for f in truth_ht.row_value})
# Similarly load, filter and flatten callset truth sample MT
ht = split_filter_and_flatten_ht(mt, high_confidence_intervals_ht)
# Outer join of truth and callset truth and annotate the score and global bin
ht = truth_ht.join(ht, how="outer")
ht = ht.annotate(snv=hl.is_snp(ht.alleles[0], ht.alleles[1]))
return ht
def add_rank(
ht: hl.Table,
score_expr: hl.expr.NumericExpression,
subrank_expr: Optional[Dict[str, hl.expr.BooleanExpression]] = None,
) -> hl.Table:
"""
Add rank based on the `score_expr`. Rank is added for snvs and indels separately.
If one or more `subrank_expr` are provided, then subrank is added based on all sites for which the boolean expression is true.
In addition, variant counts (snv, indel separately) is added as a global (`rank_variant_counts`).
:param ht: input Hail Table containing variants (with QC annotations) to be ranked
:param score_expr: the Table annotation by which ranking should be scored
:param subrank_expr: Any subranking to be added in the form name_of_subrank: subrank_filtering_expr
:return: Table with rankings added
"""
key = ht.key
if subrank_expr is None:
subrank_expr = {}
temp_expr = {"_score": score_expr}
temp_expr.update({f"_{name}": expr for name, expr in subrank_expr.items()})
rank_ht = ht.select(**temp_expr, is_snv=hl.is_snp(ht.alleles[0], ht.alleles[1]))
rank_ht = rank_ht.key_by("_score").persist()
scan_expr = {
"rank": hl.if_else(
rank_ht.is_snv,
hl.scan.count_where(rank_ht.is_snv),
hl.scan.count_where(~rank_ht.is_snv),
)
}
scan_expr.update(
{
name: hl.or_missing(
rank_ht[f"_{name}"],
hl.if_else(
rank_ht.is_snv,
hl.scan.count_where(rank_ht.is_snv & rank_ht[f"_{name}"]),
hl.scan.count_where(~rank_ht.is_snv & rank_ht[f"_{name}"]),
),
)
for name in subrank_expr
}
)
rank_ht = rank_ht.annotate(**scan_expr)
rank_ht = rank_ht.key_by(*key).persist()
rank_ht = rank_ht.select(*scan_expr.keys())
ht = ht.annotate(**rank_ht[key])
return ht
| 37.496487 | 164 | 0.598838 |
import logging
from typing import Dict, Optional
import hail as hl
logging.basicConfig(
format="%(asctime)s (%(name)s %(lineno)s): %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def compute_ranked_bin(
ht: hl.Table,
score_expr: hl.expr.NumericExpression,
bin_expr: Dict[str, hl.expr.BooleanExpression] = {"bin": True},
compute_snv_indel_separately: bool = True,
n_bins: int = 100,
desc: bool = True,
) -> hl.Table:
if compute_snv_indel_separately:
bin_expr = {
f"{bin_id}_{snv}": (bin_expr & snv_expr)
for bin_id, bin_expr in bin_expr.items()
for snv, snv_expr in [
("snv", hl.is_snp(ht.alleles[0], ht.alleles[1])),
("indel", ~hl.is_snp(ht.alleles[0], ht.alleles[1])),
]
}
bin_ht = ht.select(
**{f"_filter_{bin_id}": bin_expr for bin_id, bin_expr in bin_expr.items()},
_score=score_expr,
snv=hl.is_snp(ht.alleles[0], ht.alleles[1]),
_rand=hl.rand_unif(0, 1),
)
logger.info(
"Sorting the HT by score_expr followed by a random float between 0 and 1. "
"Then adding a row index per grouping defined by bin_expr..."
)
bin_ht = bin_ht.order_by("_score", "_rand")
bin_ht = bin_ht.annotate(
**{
f"{bin_id}_rank": hl.or_missing(
bin_ht[f"_filter_{bin_id}"],
hl.scan.count_where(bin_ht[f"_filter_{bin_id}"]),
)
for bin_id in bin_expr
}
)
bin_ht = bin_ht.key_by("locus", "alleles")
bin_ht = bin_ht.annotate_globals(
bin_group_variant_counts=bin_ht.aggregate(
hl.Struct(
**{
bin_id: hl.agg.filter(
bin_ht[f"_filter_{bin_id}"],
hl.agg.count(),
)
for bin_id in bin_expr
}
)
)
)
logger.info("Binning ranked rows into %d bins...", n_bins)
bin_ht = bin_ht.select(
"snv",
**{
bin_id: hl.int(
hl.floor(
(
n_bins
* (
bin_ht[f"{bin_id}_rank"]
/ hl.float64(bin_ht.bin_group_variant_counts[bin_id])
)
)
+ 1
)
)
for bin_id in bin_expr
},
)
if desc:
bin_ht = bin_ht.annotate(
**{bin_id: n_bins - bin_ht[bin_id] + 1 for bin_id in bin_expr}
)
if compute_snv_indel_separately:
bin_expr_no_snv = {
bin_id.rsplit("_", 1)[0] for bin_id in bin_ht.bin_group_variant_counts
}
bin_ht = bin_ht.annotate_globals(
bin_group_variant_counts=hl.struct(
**{
bin_id: hl.struct(
**{
snv: bin_ht.bin_group_variant_counts[f"{bin_id}_{snv}"]
for snv in ["snv", "indel"]
}
)
for bin_id in bin_expr_no_snv
}
)
)
bin_ht = bin_ht.transmute(
**{
bin_id: hl.if_else(
bin_ht.snv,
bin_ht[f"{bin_id}_snv"],
bin_ht[f"{bin_id}_indel"],
)
for bin_id in bin_expr_no_snv
}
)
return bin_ht
def compute_grouped_binned_ht(
bin_ht: hl.Table,
checkpoint_path: Optional[str] = None,
) -> hl.GroupedTable:
bin_ht = bin_ht.annotate(
bin_groups=hl.array(
[
hl.Struct(bin_id=bin_name, bin=bin_ht[bin_name])
for bin_name in bin_ht.bin_group_variant_counts
]
)
)
bin_ht = bin_ht.explode(bin_ht.bin_groups)
bin_ht = bin_ht.transmute(
bin_id=bin_ht.bin_groups.bin_id, bin=bin_ht.bin_groups.bin
)
bin_ht = bin_ht.filter(hl.is_defined(bin_ht.bin))
if checkpoint_path is not None:
bin_ht.checkpoint(checkpoint_path, overwrite=True)
else:
bin_ht = bin_ht.persist()
return bin_ht.group_by(
bin_id=bin_ht.bin_id,
contig=bin_ht.locus.contig,
snv=hl.is_snp(bin_ht.alleles[0], bin_ht.alleles[1]),
bi_allelic=~bin_ht.was_split,
singleton=bin_ht.singleton,
release_adj=bin_ht.ac > 0,
bin=bin_ht.bin,
)._set_buffer_size(20000)
def compute_binned_truth_sample_concordance(
ht: hl.Table,
binned_score_ht: hl.Table,
n_bins: int = 100,
add_bins: Dict[str, hl.expr.BooleanExpression] = {},
) -> hl.Table:
indexed_binned_score_ht = binned_score_ht[ht.key]
ht = ht.annotate(
**{f"global_{bin_id}": indexed_binned_score_ht[bin_id] for bin_id in add_bins},
**{f"_{bin_id}": bin_expr for bin_id, bin_expr in add_bins.items()},
score=indexed_binned_score_ht.score,
global_bin=indexed_binned_score_ht.bin,
)
bin_ht = compute_ranked_bin(
ht,
score_expr=ht.score,
bin_expr={
"truth_sample_bin": hl.expr.bool(True),
**{f"truth_sample_{bin_id}": ht[f"_{bin_id}"] for bin_id in add_bins},
},
n_bins=n_bins,
)
ht = ht.join(bin_ht, how="left")
bin_list = [
hl.tuple(["global_bin", ht.global_bin]),
hl.tuple(["truth_sample_bin", ht.truth_sample_bin]),
]
bin_list.extend(
[hl.tuple([f"global_{bin_id}", ht[f"global_{bin_id}"]]) for bin_id in add_bins]
)
bin_list.extend(
[
hl.tuple([f"truth_sample_{bin_id}", ht[f"truth_sample_{bin_id}"]])
for bin_id in add_bins
]
)
ht = ht.annotate(bin=bin_list)
ht = ht.explode(ht.bin)
ht = ht.annotate(bin_id=ht.bin[0], bin=hl.int(ht.bin[1]))
return (
ht.group_by("bin_id", "snv", "bin")
.aggregate(
tp=hl.agg.count_where(ht.GT.is_non_ref() & ht.truth_GT.is_non_ref()),
fp=hl.agg.count_where(
ht.GT.is_non_ref() & hl.or_else(ht.truth_GT.is_hom_ref(), True)
),
fn=hl.agg.count_where(
hl.or_else(ht.GT.is_hom_ref(), True) & ht.truth_GT.is_non_ref()
),
min_score=hl.agg.min(ht.score),
max_score=hl.agg.max(ht.score),
n_alleles=hl.agg.count(),
)
.repartition(5)
)
def create_truth_sample_ht(
mt: hl.MatrixTable, truth_mt: hl.MatrixTable, high_confidence_intervals_ht: hl.Table
) -> hl.Table:
def split_filter_and_flatten_ht(
truth_mt: hl.MatrixTable, high_confidence_intervals_ht: hl.Table
) -> hl.Table:
assert truth_mt.count_cols() == 1
if not "was_split" in truth_mt.row:
truth_mt = hl.split_multi_hts(truth_mt)
truth_mt = truth_mt.filter_rows(
hl.is_defined(high_confidence_intervals_ht[truth_mt.locus])
)
rename_entries = {"GT": "_GT"}
if "adj" in truth_mt.entry:
rename_entries.update({"adj": "_adj"})
truth_mt = truth_mt.rename(rename_entries)
return truth_mt.annotate_rows(
**{x: hl.agg.take(truth_mt[f"_{x}"], 1)[0] for x in rename_entries}
).rows()
truth_ht = split_filter_and_flatten_ht(truth_mt, high_confidence_intervals_ht)
truth_ht = truth_ht.rename({f: f"truth_{f}" for f in truth_ht.row_value})
ht = split_filter_and_flatten_ht(mt, high_confidence_intervals_ht)
ht = truth_ht.join(ht, how="outer")
ht = ht.annotate(snv=hl.is_snp(ht.alleles[0], ht.alleles[1]))
return ht
def add_rank(
ht: hl.Table,
score_expr: hl.expr.NumericExpression,
subrank_expr: Optional[Dict[str, hl.expr.BooleanExpression]] = None,
) -> hl.Table:
key = ht.key
if subrank_expr is None:
subrank_expr = {}
temp_expr = {"_score": score_expr}
temp_expr.update({f"_{name}": expr for name, expr in subrank_expr.items()})
rank_ht = ht.select(**temp_expr, is_snv=hl.is_snp(ht.alleles[0], ht.alleles[1]))
rank_ht = rank_ht.key_by("_score").persist()
scan_expr = {
"rank": hl.if_else(
rank_ht.is_snv,
hl.scan.count_where(rank_ht.is_snv),
hl.scan.count_where(~rank_ht.is_snv),
)
}
scan_expr.update(
{
name: hl.or_missing(
rank_ht[f"_{name}"],
hl.if_else(
rank_ht.is_snv,
hl.scan.count_where(rank_ht.is_snv & rank_ht[f"_{name}"]),
hl.scan.count_where(~rank_ht.is_snv & rank_ht[f"_{name}"]),
),
)
for name in subrank_expr
}
)
rank_ht = rank_ht.annotate(**scan_expr)
rank_ht = rank_ht.key_by(*key).persist()
rank_ht = rank_ht.select(*scan_expr.keys())
ht = ht.annotate(**rank_ht[key])
return ht
| true | true |
f73c9285438f8311e12c44865f0ccb3430d58571 | 3,011 | py | Python | AdventOfCode2021/Day18/Day18.py | MattTitmas/AdventOfCode | 36be4f6bf973f77ff93b08dc69c977bb11951f27 | [
"MIT"
] | null | null | null | AdventOfCode2021/Day18/Day18.py | MattTitmas/AdventOfCode | 36be4f6bf973f77ff93b08dc69c977bb11951f27 | [
"MIT"
] | null | null | null | AdventOfCode2021/Day18/Day18.py | MattTitmas/AdventOfCode | 36be4f6bf973f77ff93b08dc69c977bb11951f27 | [
"MIT"
] | null | null | null | from itertools import permutations
def snailfishAdd(num1: list, num2: list) -> list:
result = ["["] + num1 + num2 + ["]"]
changeMade = True
while changeMade:
changeAlreadyMade = False
nestedLevel = 0
for i in range(len(result)):
char = result[i]
if char == "[":
nestedLevel += 1
elif char == "]":
nestedLevel -= 1
if char.isnumeric() and nestedLevel >= 5:
for b in range(i-1, -1, -1):
if result[b].isnumeric():
result[b] = str(int(result[b]) + int(result[i]))
break
for f in range(i+2, len(result)):
if result[f].isnumeric():
result[f] = str(int(result[f]) + int(result[i+1]))
break
for j in range(i+2, i-2, -1):
result.pop(j)
result.insert(i-1, "0")
changeAlreadyMade = True
break
if not changeAlreadyMade:
for i in range(len(result)):
char = result[i]
if char.isnumeric() and int(char) > 9:
val1 = int(char) // 2
val2 = int(char) // 2 + int(char) % 2
result.pop(i)
result.insert(i, "]")
result.insert(i, str(val2))
result.insert(i, str(val1))
result.insert(i, "[")
changeAlreadyMade = True
break
changeMade = False
if changeAlreadyMade:
changeMade = True
return result
def getMag(num : str) -> int:
left, right = "",""
nestedLevel = 0
for i in range(2, len(num)-1, 2):
char = num[i]
if char == "[":
nestedLevel += 1
elif char == "]":
nestedLevel -= 1
if nestedLevel == 0:
left = num[2:i+1]
right = num[i+2:len(num)-2]
break
left = int(left[0]) if len(left) == 1 else getMag(left)
right = int(right[-1]) if len(right) == 1 else getMag(right)
return 3*left + 2*right
def part1():
values = [list(i) for i in open("input.txt").read().split("\n")]
for i in range(len(values)):
values[i] = list(filter(lambda a: a != ',', values[i]))
result = values[0]
for i in range(1, len(values)):
result = snailfishAdd(result, values[i])
return getMag(" ".join(result))
def part2():
values = [list(i) for i in open("input.txt").read().split("\n")]
for i in range(len(values)):
values[i] = list(filter(lambda a: a != ',', values[i]))
maxMag = -float("inf")
count = 0
for i in permutations(values, 2):
count += 1
result = snailfishAdd(i[0], i[1])
maxMag = max(maxMag, getMag(" ".join(result)))
return maxMag
print(f"answer to part1: {part1()}")
print(f"answer to part2: {part2()}") | 33.831461 | 74 | 0.471272 | from itertools import permutations
def snailfishAdd(num1: list, num2: list) -> list:
result = ["["] + num1 + num2 + ["]"]
changeMade = True
while changeMade:
changeAlreadyMade = False
nestedLevel = 0
for i in range(len(result)):
char = result[i]
if char == "[":
nestedLevel += 1
elif char == "]":
nestedLevel -= 1
if char.isnumeric() and nestedLevel >= 5:
for b in range(i-1, -1, -1):
if result[b].isnumeric():
result[b] = str(int(result[b]) + int(result[i]))
break
for f in range(i+2, len(result)):
if result[f].isnumeric():
result[f] = str(int(result[f]) + int(result[i+1]))
break
for j in range(i+2, i-2, -1):
result.pop(j)
result.insert(i-1, "0")
changeAlreadyMade = True
break
if not changeAlreadyMade:
for i in range(len(result)):
char = result[i]
if char.isnumeric() and int(char) > 9:
val1 = int(char) // 2
val2 = int(char) // 2 + int(char) % 2
result.pop(i)
result.insert(i, "]")
result.insert(i, str(val2))
result.insert(i, str(val1))
result.insert(i, "[")
changeAlreadyMade = True
break
changeMade = False
if changeAlreadyMade:
changeMade = True
return result
def getMag(num : str) -> int:
left, right = "",""
nestedLevel = 0
for i in range(2, len(num)-1, 2):
char = num[i]
if char == "[":
nestedLevel += 1
elif char == "]":
nestedLevel -= 1
if nestedLevel == 0:
left = num[2:i+1]
right = num[i+2:len(num)-2]
break
left = int(left[0]) if len(left) == 1 else getMag(left)
right = int(right[-1]) if len(right) == 1 else getMag(right)
return 3*left + 2*right
def part1():
values = [list(i) for i in open("input.txt").read().split("\n")]
for i in range(len(values)):
values[i] = list(filter(lambda a: a != ',', values[i]))
result = values[0]
for i in range(1, len(values)):
result = snailfishAdd(result, values[i])
return getMag(" ".join(result))
def part2():
values = [list(i) for i in open("input.txt").read().split("\n")]
for i in range(len(values)):
values[i] = list(filter(lambda a: a != ',', values[i]))
maxMag = -float("inf")
count = 0
for i in permutations(values, 2):
count += 1
result = snailfishAdd(i[0], i[1])
maxMag = max(maxMag, getMag(" ".join(result)))
return maxMag
print(f"answer to part1: {part1()}")
print(f"answer to part2: {part2()}") | true | true |
f73c92ffba2f59921cdc57284eac4259a30396c2 | 627 | py | Python | tests/irr/test_irt.py | ViviHong200709/EduCDM | ff8a4ec60902b95de47aa8d96c12c9282d6855f2 | [
"Apache-2.0"
] | 36 | 2021-04-28T03:22:03.000Z | 2022-03-30T16:54:44.000Z | tests/irr/test_irt.py | ViviHong200709/EduCDM | ff8a4ec60902b95de47aa8d96c12c9282d6855f2 | [
"Apache-2.0"
] | 21 | 2021-03-18T14:10:11.000Z | 2022-01-29T14:12:45.000Z | tests/irr/test_irt.py | ViviHong200709/EduCDM | ff8a4ec60902b95de47aa8d96c12c9282d6855f2 | [
"Apache-2.0"
] | 36 | 2021-03-17T14:43:18.000Z | 2022-03-29T07:52:26.000Z | # coding: utf-8
# 2021/6/19 @ tongshiwei
from EduCDM.IRR import IRT
def test_irr_irt(train_data, test_data, params, tmp_path):
cdm = IRT(params.user_num, params.item_num, params.knowledge_num)
cdm.train(train_data, test_data=test_data, epoch=2)
filepath = tmp_path / "irr.params"
cdm.save(filepath)
cdm.load(filepath)
def test_irt(zero_train_data, test_data, params, tmp_path):
cdm = IRT(params.user_num, params.item_num, params.knowledge_num, zeta=0)
cdm.train(zero_train_data, test_data=test_data, epoch=2)
filepath = tmp_path / "irr.params"
cdm.save(filepath)
cdm.load(filepath)
| 29.857143 | 77 | 0.727273 |
from EduCDM.IRR import IRT
def test_irr_irt(train_data, test_data, params, tmp_path):
cdm = IRT(params.user_num, params.item_num, params.knowledge_num)
cdm.train(train_data, test_data=test_data, epoch=2)
filepath = tmp_path / "irr.params"
cdm.save(filepath)
cdm.load(filepath)
def test_irt(zero_train_data, test_data, params, tmp_path):
cdm = IRT(params.user_num, params.item_num, params.knowledge_num, zeta=0)
cdm.train(zero_train_data, test_data=test_data, epoch=2)
filepath = tmp_path / "irr.params"
cdm.save(filepath)
cdm.load(filepath)
| true | true |
f73c93e11217afcecc5242ec7518a375c2b41f86 | 5,499 | py | Python | train.py | EverLookNeverSee/BTS_DP_MRI | c4e36c766c3ccf2ddea7c935f81df79ed8b72247 | [
"MIT"
] | 5 | 2021-12-13T19:06:06.000Z | 2022-03-06T08:41:50.000Z | train.py | EverLookNeverSee/BTS_DP_MRI | c4e36c766c3ccf2ddea7c935f81df79ed8b72247 | [
"MIT"
] | null | null | null | train.py | EverLookNeverSee/BTS_DP_MRI | c4e36c766c3ccf2ddea7c935f81df79ed8b72247 | [
"MIT"
] | null | null | null | """
Training 3D U-Net Model
@author: Milad Sadeghi DM - EverLookNeverSee@GitHub
"""
import os
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from model import build_unet_model
from sklearn.model_selection import KFold
from tensorflow.keras.optimizers import Adam
from image_data_generator import image_generator
from tensorflow.keras.callbacks import TensorBoard, CSVLogger
from segmentation_models_3D.metrics import IOUScore
from segmentation_models_3D.losses import DiceLoss, CategoricalFocalLoss
# Initializing cli argument parser
parser = argparse.ArgumentParser()
# Adding arguments
parser.add_argument("-d", "--dataset", help="Path to .npy files directory")
parser.add_argument("-v", "--verbose", action="store_true", help="Level of verbosity")
parser.add_argument("-l", "--learning_rate", help="Learning rate", type=float, default=0.001)
parser.add_argument("-b", "--batch_size", help="Batch size", type=int, default=2)
parser.add_argument("-e", "--epochs", help="Number of epochs", type=int, default=100)
parser.add_argument("-s", "--save", help="Path to save trained model", default=os.getcwd())
# Parsing the arguments
args = parser.parse_args()
kf = KFold(n_splits=8) # Configuring kfold cross validation
fold_counter = 1 # Initializing fold counter
for train, valid in kf.split(range(34)): # 33 is the number of samples
print(f"Fold Number {fold_counter}")
train_data_generator = image_generator(path=args.dataset, indexes=train, batch_size=2)
valid_data_generator = image_generator(path=args.dataset, indexes=valid, batch_size=2)
# Calculating class weights
columns = ["0", "1", "2"]
df = pd.DataFrame(columns=columns)
mask_list = list()
for index in train:
mask_list.append(f"{args.dataset}/{index}/mask_{index}.npy")
for img in range(len(mask_list)):
tmp_image = np.load(mask_list[img])
tmp_image = np.argmax(tmp_image, axis=3)
val, counts = np.unique(tmp_image, return_counts=True)
zipped = zip(columns, counts)
counts_dict = dict(zipped)
df = df.append(counts_dict, ignore_index=True)
label_0 = df['0'].sum()
label_1 = df['1'].sum()
label_2 = df['2'].sum()
total_labels = label_0 + label_1 + label_2
n_classes = 3
wt0 = round((total_labels / (n_classes * label_0)), 2)
wt1 = round((total_labels / (n_classes * label_1)), 2)
wt2 = round((total_labels / (n_classes * label_2)), 2)
dice_loss = DiceLoss(class_weights=np.array([wt0, wt1, wt2]))
focal_loss = CategoricalFocalLoss()
# Combining loss functions in order to create better total loss function
total_loss = dice_loss + (1 * focal_loss)
# Setting accuracy and IntersectionOverUnion as metrics
metrics = ["accuracy", "TruePositives", "TrueNegatives", "FalsePositives", "FalseNegatives",
"Precision", "Recall", IOUScore(threshold=0.5)]
# Building the model
model = build_unet_model(64, 64, 16, 2, 3)
# Defining callback objects
tensorboard_callback = TensorBoard(log_dir="./tb_logs", histogram_freq=1, write_graph=True,
write_images=True, update_freq="epoch")
# Defining logger callback
logger_callback = CSVLogger("log_file.csv", separator=",", append=True)
# Compiling the model
model.compile(optimizer=Adam(learning_rate=args.learning_rate), loss=total_loss, metrics=metrics)
n_training_samples = len(train)
n_validating_samples = len(valid)
# Setting training process
history = model.fit(
train_data_generator,
steps_per_epoch=n_training_samples//2,
validation_data=valid_data_generator,
validation_steps=n_validating_samples//2,
shuffle=True,
epochs=args.epochs,
verbose=args.verbose,
callbacks=[tensorboard_callback, logger_callback]
)
# Saving the trained model
model.save(filepath=f"{args.save}/BTS_DP_MRI_fold_0{fold_counter}.hdf5", overwrite=True)
if args.verbose:
# Plotting model history
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, "y", label="Training Loss")
plt.plot(epochs, val_loss, "r", label="Validation Loss")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.savefig(fname=f"./tv_loss_0{fold_counter}.png", dpi=960)
plt.show()
iou_score = history.history["iou_score"]
val_iou_score = history.history["val_iou_score"]
plt.plot(epochs, iou_score, 'y', label='Training IOU Score')
plt.plot(epochs, val_iou_score, 'r', label='Validation IOU Score')
plt.title('Training and validation IOU Score')
plt.xlabel('Epochs')
plt.ylabel('IOU Score')
plt.legend()
plt.savefig(fname=f"./tv_iou_score_0{fold_counter}.png", dpi=960)
plt.show()
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
plt.plot(epochs, acc, 'y', label='Training accuracy')
plt.plot(epochs, val_acc, 'r', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig(fname=f"./tv_acc_0{fold_counter}.png", dpi=960)
plt.show()
fold_counter += 1
| 39.278571 | 101 | 0.678487 |
import os
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from model import build_unet_model
from sklearn.model_selection import KFold
from tensorflow.keras.optimizers import Adam
from image_data_generator import image_generator
from tensorflow.keras.callbacks import TensorBoard, CSVLogger
from segmentation_models_3D.metrics import IOUScore
from segmentation_models_3D.losses import DiceLoss, CategoricalFocalLoss
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dataset", help="Path to .npy files directory")
parser.add_argument("-v", "--verbose", action="store_true", help="Level of verbosity")
parser.add_argument("-l", "--learning_rate", help="Learning rate", type=float, default=0.001)
parser.add_argument("-b", "--batch_size", help="Batch size", type=int, default=2)
parser.add_argument("-e", "--epochs", help="Number of epochs", type=int, default=100)
parser.add_argument("-s", "--save", help="Path to save trained model", default=os.getcwd())
args = parser.parse_args()
kf = KFold(n_splits=8)
fold_counter = 1
for train, valid in kf.split(range(34)):
print(f"Fold Number {fold_counter}")
train_data_generator = image_generator(path=args.dataset, indexes=train, batch_size=2)
valid_data_generator = image_generator(path=args.dataset, indexes=valid, batch_size=2)
columns = ["0", "1", "2"]
df = pd.DataFrame(columns=columns)
mask_list = list()
for index in train:
mask_list.append(f"{args.dataset}/{index}/mask_{index}.npy")
for img in range(len(mask_list)):
tmp_image = np.load(mask_list[img])
tmp_image = np.argmax(tmp_image, axis=3)
val, counts = np.unique(tmp_image, return_counts=True)
zipped = zip(columns, counts)
counts_dict = dict(zipped)
df = df.append(counts_dict, ignore_index=True)
label_0 = df['0'].sum()
label_1 = df['1'].sum()
label_2 = df['2'].sum()
total_labels = label_0 + label_1 + label_2
n_classes = 3
wt0 = round((total_labels / (n_classes * label_0)), 2)
wt1 = round((total_labels / (n_classes * label_1)), 2)
wt2 = round((total_labels / (n_classes * label_2)), 2)
dice_loss = DiceLoss(class_weights=np.array([wt0, wt1, wt2]))
focal_loss = CategoricalFocalLoss()
total_loss = dice_loss + (1 * focal_loss)
metrics = ["accuracy", "TruePositives", "TrueNegatives", "FalsePositives", "FalseNegatives",
"Precision", "Recall", IOUScore(threshold=0.5)]
model = build_unet_model(64, 64, 16, 2, 3)
tensorboard_callback = TensorBoard(log_dir="./tb_logs", histogram_freq=1, write_graph=True,
write_images=True, update_freq="epoch")
logger_callback = CSVLogger("log_file.csv", separator=",", append=True)
model.compile(optimizer=Adam(learning_rate=args.learning_rate), loss=total_loss, metrics=metrics)
n_training_samples = len(train)
n_validating_samples = len(valid)
history = model.fit(
train_data_generator,
steps_per_epoch=n_training_samples//2,
validation_data=valid_data_generator,
validation_steps=n_validating_samples//2,
shuffle=True,
epochs=args.epochs,
verbose=args.verbose,
callbacks=[tensorboard_callback, logger_callback]
)
model.save(filepath=f"{args.save}/BTS_DP_MRI_fold_0{fold_counter}.hdf5", overwrite=True)
if args.verbose:
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, "y", label="Training Loss")
plt.plot(epochs, val_loss, "r", label="Validation Loss")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.savefig(fname=f"./tv_loss_0{fold_counter}.png", dpi=960)
plt.show()
iou_score = history.history["iou_score"]
val_iou_score = history.history["val_iou_score"]
plt.plot(epochs, iou_score, 'y', label='Training IOU Score')
plt.plot(epochs, val_iou_score, 'r', label='Validation IOU Score')
plt.title('Training and validation IOU Score')
plt.xlabel('Epochs')
plt.ylabel('IOU Score')
plt.legend()
plt.savefig(fname=f"./tv_iou_score_0{fold_counter}.png", dpi=960)
plt.show()
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
plt.plot(epochs, acc, 'y', label='Training accuracy')
plt.plot(epochs, val_acc, 'r', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig(fname=f"./tv_acc_0{fold_counter}.png", dpi=960)
plt.show()
fold_counter += 1
| true | true |
f73c9477e195c147e756da1e3cb4c32040e8235d | 4,152 | py | Python | benchmark/startQiskit_noisy2320.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy2320.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy2320.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=37
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=19
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.h(input_qubit[3]) # number=24
prog.cz(input_qubit[0],input_qubit[3]) # number=25
prog.h(input_qubit[3]) # number=26
prog.cx(input_qubit[0],input_qubit[3]) # number=31
prog.x(input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=34
prog.cz(input_qubit[0],input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.y(input_qubit[1]) # number=29
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[1]) # number=30
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.swap(input_qubit[3],input_qubit[0]) # number=22
prog.swap(input_qubit[3],input_qubit[0]) # number=23
prog.swap(input_qubit[1],input_qubit[0]) # number=27
prog.swap(input_qubit[1],input_qubit[0]) # number=28
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2320.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.6 | 140 | 0.654383 |
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.x(input_qubit[3])
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.y(input_qubit[3])
prog.h(input_qubit[0])
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1])
prog.y(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[1])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.y(input_qubit[2])
prog.y(input_qubit[2])
prog.swap(input_qubit[3],input_qubit[0])
prog.swap(input_qubit[3],input_qubit[0])
prog.swap(input_qubit[1],input_qubit[0])
prog.swap(input_qubit[1],input_qubit[0])
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2320.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
f73c953f2b3ab1aaff4b5fc5d04de7ab15f1fcac | 527 | py | Python | user/migrations/0004_specialization_department.py | namantam1/or-module | d2437b3f0067588975ec9460d881a8885b363383 | [
"MIT"
] | 1 | 2021-09-25T08:20:52.000Z | 2021-09-25T08:20:52.000Z | user/migrations/0004_specialization_department.py | namantam1/or-module | d2437b3f0067588975ec9460d881a8885b363383 | [
"MIT"
] | null | null | null | user/migrations/0004_specialization_department.py | namantam1/or-module | d2437b3f0067588975ec9460d881a8885b363383 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-18 16:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20210918_2131'),
]
operations = [
migrations.AddField(
model_name='specialization',
name='department',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='user.department'),
preserve_default=False,
),
]
| 25.095238 | 115 | 0.639469 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20210918_2131'),
]
operations = [
migrations.AddField(
model_name='specialization',
name='department',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='user.department'),
preserve_default=False,
),
]
| true | true |
f73c98e14dea4ec7d86b3628ab17dd13ac58cd61 | 9,267 | py | Python | animations/utils/gradient.py | TristanCacqueray/demo-render | 4c8403e684165e5e75c046ee023c1f794a6650a8 | [
"Apache-2.0"
] | 9 | 2018-02-19T14:17:12.000Z | 2021-03-27T14:46:28.000Z | animations/utils/gradient.py | TristanCacqueray/demo-render | 4c8403e684165e5e75c046ee023c1f794a6650a8 | [
"Apache-2.0"
] | null | null | null | animations/utils/gradient.py | TristanCacqueray/demo-render | 4c8403e684165e5e75c046ee023c1f794a6650a8 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import colorsys
import io
import math
class Gradient:
multi_gradients = False
def to_array(self, length):
colors_array = []
for idx in range(length):
colors_array.append(str(self.color(idx / length)))
return ",".join(colors_array)
class GimpGradient(Gradient):
""" Read and interpret a Gimp .ggr gradient file.
Code adapted from https://nedbatchelder.com/code/modules/ggr.html
"""
def __init__(self, f=None):
if f:
self.read(f)
class _segment:
pass
def read(self, f):
""" Read a .ggr file from f (either an open file or a file path)."""
if isinstance(f, str):
f = open(f)
if f.readline().strip() != "GIMP Gradient":
raise Exception("Not a GIMP gradient file")
line = f.readline().strip()
if line.startswith("Name: "):
#raise Exception("Not a GIMP gradient file")
self.name = line.split(": ", 1)[1]
line = f.readline().strip()
nsegs = int(line)
self.segs = []
for i in range(nsegs):
line = f.readline().strip()
if not line:
break
seg = self._segment()
(seg.k, seg.m, seg.r,
seg.rl, seg.gl, seg.bl, _,
seg.rr, seg.gr, seg.br, _,
seg.fn, seg.space) = map(float, line.split()[:13])
self.segs.append(seg)
def color(self, x):
""" Get the color for the point x in the range [0..1)."""
# Find the segment.
for seg in self.segs:
if seg.k <= x <= seg.r:
break
else:
# No segment applies! Return black I guess.
return (0, 0, 0)
# Normalize the segment geometry.
mid = (seg.m - seg.k)/(seg.r - seg.k)
pos = (x - seg.k)/(seg.r - seg.k)
# Assume linear (most common, and needed by most others).
if pos <= mid:
f = pos/mid/2
else:
f = (pos - mid)/(1 - mid)/2 + 0.5
# Find the correct interpolation factor.
if seg.fn == 1: # Curved
f = math.pow(pos, math.log(0.5) / math.log(mid))
elif seg.fn == 2: # Sinusoidal
f = (math.sin((-math.pi/2) + math.pi*f) + 1)/2
elif seg.fn == 3: # Spherical increasing
f -= 1
f = math.sqrt(1 - f*f)
elif seg.fn == 4: # Spherical decreasing
f = 1 - math.sqrt(1 - f*f)
# Interpolate the colors
if seg.space == 0:
return (0xff << 24 |
int((seg.rl + (seg.rr-seg.rl) * f) * 0xff) << 16 |
int((seg.gl + (seg.gr-seg.gl) * f) * 0xff) << 8 |
int((seg.bl + (seg.br-seg.bl) * f) * 0xff))
elif seg.space in (1, 2):
hl, sl, vl = colorsys.rgb_to_hsv(seg.rl, seg.gl, seg.bl)
hr, sr, vr = colorsys.rgb_to_hsv(seg.rr, seg.gr, seg.br)
if seg.space == 1 and hr < hl:
hr += 1
elif seg.space == 2 and hr > hl:
hr -= 1
c = colorsys.hsv_to_rgb(
(hl + (hr-hl) * f) % 1.0,
sl + (sr-sl) * f,
vl + (vr-vl) * f
)
return (0xff << 24 |
int(c[0] * 0xff) << 16 |
int((c[1] * 0xff)) << 8 |
int(c[2] * 0xff))
class Ugr(Gradient):
def __init__(self, f, name=None):
self.gradients = {}
if isinstance(f, str):
f = open(f)
gradient = []
last_index = 0
while True:
line = f.readline()
if line == '':
break
if "}" in line:
self.gradients[gradient[0]] = gradient[1]
if "title=" in line:
last_index = 0
gradient = [line.split('"')[1], []]
if name is None:
self.multi_gradients = True
name = gradient[0]
if "color=" in line:
index = int(line.split('index=')[1].split()[0])
c = int(line.split('color=')[1].split()[0])
if last_index != index:
# Inject transition color
lc = gradient[1][-1]
lr, lg, lb = (lc >> 16) & 0xff, (lc >> 8) & 0xff, lc & 0xff
nr, ng, nb = (c >> 16) & 0xff, (c >> 8) & 0xff, c & 0xff
r = 1 + index - last_index
for idx in range(r):
gradient[1].append(
0xff << 24 |
(int(lr + (nr - lr) * idx / r) & 0xff) << 16 |
(int(lg + (ng - lg) * idx / r) & 0xff) << 8 |
(int(lb + (nb - lb) * idx / r) & 0xff)
)
last_index = index
gradient[1].append(0xff << 24 | c)
if name not in self.gradients:
raise RuntimeError("Unknown gradient %s in %s" % (
name, list(self.gradients.keys())))
self.name = name
def color(self, x, name=None):
if name is None:
name = self.name
pos = int(len(self.gradients[name]) * x)
return self.gradients[name][pos]
def get(name):
import os
gname = None
if ":" in name:
name, gname = name.split(':')
local_file = os.path.join(os.path.dirname(__file__), "gradients", name)
if os.path.exists(local_file):
name = local_file
if name in DEFAULT_GRADIENTS:
gradient = GimpGradient(io.StringIO(DEFAULT_GRADIENTS[name]))
else:
if name.endswith(".ggr"):
gradient = GimpGradient(name)
elif name.endswith(".ugr"):
gradient = Ugr(name, gname)
else:
raise RuntimeError("Only GimpGradient/UGR format is supported")
return gradient
def generate_array(name, length):
return get(name).to_array(length)
DEFAULT_GRADIENTS = {
"purples": """GIMP Gradient
Name: Purples
7
0.00000 0.05759 0.09849 0.30303 0.10963 0.27308 1 0.51441 0.27924 0.73484 1 0 0
0.09849 0.17696 0.22871 0.51441 0.27924 0.73484 1 0.60460 0.33150 0.65000 1 0 0
0.22871 0.34724 0.40400 0.60460 0.33150 0.65000 1 0.20050 0.16988 0.39393 1 0 0
0.40400 0.48080 0.54424 0.20050 0.16988 0.39393 1 0.50053 0.32330 0.53000 1 0 0
0.54424 0.62876 0.71328 0.50053 0.32330 0.53000 1 0.60064 0.44574 0.68166 1 0 0
0.71328 0.76649 0.81969 0.60064 0.44574 0.68166 1 0.70075 0.56818 0.83333 1 0 0
0.81969 0.92821 1.00000 0.70075 0.56818 0.83333 1 0.18474 0.14979 0.21969 1 0 0
""",
"sunrise": """GIMP Gradient
Name: Sunrise
6
0.000000 0.101798 0.203595 1.000000 1.000000 1.000000 1.000000 0.948165 0.969697 0.812122 1 0 0
0.203595 0.379143 0.487479 0.948165 0.969697 0.812122 1.000000 1.000000 0.552632 0.270000 1 0 0
0.487479 0.503577 0.529137 1.000000 0.552632 0.270000 1.000000 0.581721 0.096155 0.170043 1 0 0
0.529137 0.545165 0.562604 0.581721 0.096155 0.170043 1.000000 0.287879 0.155229 0.049835 1 0 0
0.562604 0.609349 0.697830 0.287879 0.155229 0.049835 1.000000 0.336000 0.425966 0.800000 1 0 0
0.697830 0.845064 1.000000 0.336000 0.425966 0.800000 1.000000 0.852165 0.985930 1.000000 1 0 0
""",
"incandescent": """GIMP Gradient
Name: Incandescent
4
0.000000 0.459098 0.594324 0.000000 0.000000 0.000000 1.000000 0.729412 0.000000 0.000000 1 0 0
0.594324 0.677796 0.809683 0.729412 0.000000 0.000000 1.000000 1.000000 0.545098 0.196078 1 0 0
0.809683 0.853088 0.899833 1.000000 0.545098 0.196078 1.000000 0.972549 0.937255 0.074510 1 0 0
0.899833 0.948247 1.000000 0.972549 0.937255 0.074510 1.000000 0.976471 0.968627 0.831373 1 0 0
"""
}
if __name__ == '__main__':
import sys
from game import Screen, Window
WINSIZE = (1000, 200)
screen = Screen(WINSIZE)
window = Window(WINSIZE)
screen.add(window)
for name in sys.argv[1:]:
gradient = get(name)
if gradient.multi_gradients:
for gname in gradient.gradients:
print("%s:%s" % (name, gname), end='')
for x in range(WINSIZE[0]):
window.draw_line((x, 0),
(x, WINSIZE[1]),
gradient.color(x / WINSIZE[0], gname))
screen.update()
input()
else:
print(name, end='')
for x in range(WINSIZE[0]):
window.draw_line((x, 0),
(x, WINSIZE[1]),
gradient.color(x / WINSIZE[0]))
screen.update()
input()
| 35.779923 | 95 | 0.524981 |
import colorsys
import io
import math
class Gradient:
multi_gradients = False
def to_array(self, length):
colors_array = []
for idx in range(length):
colors_array.append(str(self.color(idx / length)))
return ",".join(colors_array)
class GimpGradient(Gradient):
def __init__(self, f=None):
if f:
self.read(f)
class _segment:
pass
def read(self, f):
if isinstance(f, str):
f = open(f)
if f.readline().strip() != "GIMP Gradient":
raise Exception("Not a GIMP gradient file")
line = f.readline().strip()
if line.startswith("Name: "):
self.name = line.split(": ", 1)[1]
line = f.readline().strip()
nsegs = int(line)
self.segs = []
for i in range(nsegs):
line = f.readline().strip()
if not line:
break
seg = self._segment()
(seg.k, seg.m, seg.r,
seg.rl, seg.gl, seg.bl, _,
seg.rr, seg.gr, seg.br, _,
seg.fn, seg.space) = map(float, line.split()[:13])
self.segs.append(seg)
def color(self, x):
for seg in self.segs:
if seg.k <= x <= seg.r:
break
else:
return (0, 0, 0)
mid = (seg.m - seg.k)/(seg.r - seg.k)
pos = (x - seg.k)/(seg.r - seg.k)
if pos <= mid:
f = pos/mid/2
else:
f = (pos - mid)/(1 - mid)/2 + 0.5
if seg.fn == 1:
f = math.pow(pos, math.log(0.5) / math.log(mid))
elif seg.fn == 2:
f = (math.sin((-math.pi/2) + math.pi*f) + 1)/2
elif seg.fn == 3:
f -= 1
f = math.sqrt(1 - f*f)
elif seg.fn == 4:
f = 1 - math.sqrt(1 - f*f)
if seg.space == 0:
return (0xff << 24 |
int((seg.rl + (seg.rr-seg.rl) * f) * 0xff) << 16 |
int((seg.gl + (seg.gr-seg.gl) * f) * 0xff) << 8 |
int((seg.bl + (seg.br-seg.bl) * f) * 0xff))
elif seg.space in (1, 2):
hl, sl, vl = colorsys.rgb_to_hsv(seg.rl, seg.gl, seg.bl)
hr, sr, vr = colorsys.rgb_to_hsv(seg.rr, seg.gr, seg.br)
if seg.space == 1 and hr < hl:
hr += 1
elif seg.space == 2 and hr > hl:
hr -= 1
c = colorsys.hsv_to_rgb(
(hl + (hr-hl) * f) % 1.0,
sl + (sr-sl) * f,
vl + (vr-vl) * f
)
return (0xff << 24 |
int(c[0] * 0xff) << 16 |
int((c[1] * 0xff)) << 8 |
int(c[2] * 0xff))
class Ugr(Gradient):
def __init__(self, f, name=None):
self.gradients = {}
if isinstance(f, str):
f = open(f)
gradient = []
last_index = 0
while True:
line = f.readline()
if line == '':
break
if "}" in line:
self.gradients[gradient[0]] = gradient[1]
if "title=" in line:
last_index = 0
gradient = [line.split('"')[1], []]
if name is None:
self.multi_gradients = True
name = gradient[0]
if "color=" in line:
index = int(line.split('index=')[1].split()[0])
c = int(line.split('color=')[1].split()[0])
if last_index != index:
# Inject transition color
lc = gradient[1][-1]
lr, lg, lb = (lc >> 16) & 0xff, (lc >> 8) & 0xff, lc & 0xff
nr, ng, nb = (c >> 16) & 0xff, (c >> 8) & 0xff, c & 0xff
r = 1 + index - last_index
for idx in range(r):
gradient[1].append(
0xff << 24 |
(int(lr + (nr - lr) * idx / r) & 0xff) << 16 |
(int(lg + (ng - lg) * idx / r) & 0xff) << 8 |
(int(lb + (nb - lb) * idx / r) & 0xff)
)
last_index = index
gradient[1].append(0xff << 24 | c)
if name not in self.gradients:
raise RuntimeError("Unknown gradient %s in %s" % (
name, list(self.gradients.keys())))
self.name = name
def color(self, x, name=None):
if name is None:
name = self.name
pos = int(len(self.gradients[name]) * x)
return self.gradients[name][pos]
def get(name):
import os
gname = None
if ":" in name:
name, gname = name.split(':')
local_file = os.path.join(os.path.dirname(__file__), "gradients", name)
if os.path.exists(local_file):
name = local_file
if name in DEFAULT_GRADIENTS:
gradient = GimpGradient(io.StringIO(DEFAULT_GRADIENTS[name]))
else:
if name.endswith(".ggr"):
gradient = GimpGradient(name)
elif name.endswith(".ugr"):
gradient = Ugr(name, gname)
else:
raise RuntimeError("Only GimpGradient/UGR format is supported")
return gradient
def generate_array(name, length):
return get(name).to_array(length)
DEFAULT_GRADIENTS = {
"purples": """GIMP Gradient
Name: Purples
7
0.00000 0.05759 0.09849 0.30303 0.10963 0.27308 1 0.51441 0.27924 0.73484 1 0 0
0.09849 0.17696 0.22871 0.51441 0.27924 0.73484 1 0.60460 0.33150 0.65000 1 0 0
0.22871 0.34724 0.40400 0.60460 0.33150 0.65000 1 0.20050 0.16988 0.39393 1 0 0
0.40400 0.48080 0.54424 0.20050 0.16988 0.39393 1 0.50053 0.32330 0.53000 1 0 0
0.54424 0.62876 0.71328 0.50053 0.32330 0.53000 1 0.60064 0.44574 0.68166 1 0 0
0.71328 0.76649 0.81969 0.60064 0.44574 0.68166 1 0.70075 0.56818 0.83333 1 0 0
0.81969 0.92821 1.00000 0.70075 0.56818 0.83333 1 0.18474 0.14979 0.21969 1 0 0
""",
"sunrise": """GIMP Gradient
Name: Sunrise
6
0.000000 0.101798 0.203595 1.000000 1.000000 1.000000 1.000000 0.948165 0.969697 0.812122 1 0 0
0.203595 0.379143 0.487479 0.948165 0.969697 0.812122 1.000000 1.000000 0.552632 0.270000 1 0 0
0.487479 0.503577 0.529137 1.000000 0.552632 0.270000 1.000000 0.581721 0.096155 0.170043 1 0 0
0.529137 0.545165 0.562604 0.581721 0.096155 0.170043 1.000000 0.287879 0.155229 0.049835 1 0 0
0.562604 0.609349 0.697830 0.287879 0.155229 0.049835 1.000000 0.336000 0.425966 0.800000 1 0 0
0.697830 0.845064 1.000000 0.336000 0.425966 0.800000 1.000000 0.852165 0.985930 1.000000 1 0 0
""",
"incandescent": """GIMP Gradient
Name: Incandescent
4
0.000000 0.459098 0.594324 0.000000 0.000000 0.000000 1.000000 0.729412 0.000000 0.000000 1 0 0
0.594324 0.677796 0.809683 0.729412 0.000000 0.000000 1.000000 1.000000 0.545098 0.196078 1 0 0
0.809683 0.853088 0.899833 1.000000 0.545098 0.196078 1.000000 0.972549 0.937255 0.074510 1 0 0
0.899833 0.948247 1.000000 0.972549 0.937255 0.074510 1.000000 0.976471 0.968627 0.831373 1 0 0
"""
}
if __name__ == '__main__':
import sys
from game import Screen, Window
WINSIZE = (1000, 200)
screen = Screen(WINSIZE)
window = Window(WINSIZE)
screen.add(window)
for name in sys.argv[1:]:
gradient = get(name)
if gradient.multi_gradients:
for gname in gradient.gradients:
print("%s:%s" % (name, gname), end='')
for x in range(WINSIZE[0]):
window.draw_line((x, 0),
(x, WINSIZE[1]),
gradient.color(x / WINSIZE[0], gname))
screen.update()
input()
else:
print(name, end='')
for x in range(WINSIZE[0]):
window.draw_line((x, 0),
(x, WINSIZE[1]),
gradient.color(x / WINSIZE[0]))
screen.update()
input()
| true | true |
f73c99410d86debcfd12b93a95424dc4d122894a | 15,643 | py | Python | sdk/python/pulumi_alicloud/ram/outputs.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 42 | 2019-03-18T06:34:37.000Z | 2022-03-24T07:08:57.000Z | sdk/python/pulumi_alicloud/ram/outputs.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 152 | 2019-04-15T21:03:44.000Z | 2022-03-29T18:00:57.000Z | sdk/python/pulumi_alicloud/ram/outputs.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2020-08-26T17:30:07.000Z | 2021-07-05T01:37:45.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'PolicyStatement',
'GetGroupsGroupResult',
'GetPoliciesPolicyResult',
'GetRolesRoleResult',
'GetSamlProvidersProviderResult',
'GetUsersUserResult',
]
@pulumi.output_type
class PolicyStatement(dict):
def __init__(__self__, *,
actions: Sequence[str],
effect: str,
resources: Sequence[str]):
"""
:param Sequence[str] actions: (It has been deprecated from version 1.49.0, and use field 'document' to replace.) List of operations for the `resource`. The format of each item in this list is `${service}:${action_name}`, such as `oss:ListBuckets` and `ecs:Describe*`. The `${service}` can be `ecs`, `oss`, `ots` and so on, the `${action_name}` refers to the name of an api interface which related to the `${service}`.
:param str effect: (It has been deprecated from version 1.49.0, and use field 'document' to replace.) This parameter indicates whether or not the `action` is allowed. Valid values are `Allow` and `Deny`.
:param Sequence[str] resources: (It has been deprecated from version 1.49.0, and use field 'document' to replace.) List of specific objects which will be authorized. The format of each item in this list is `acs:${service}:${region}:${account_id}:${relative_id}`, such as `acs:ecs:*:*:instance/inst-002` and `acs:oss:*:1234567890000:mybucket`. The `${service}` can be `ecs`, `oss`, `ots` and so on, the `${region}` is the region info which can use `*` replace when it is not supplied, the `${account_id}` refers to someone's Alicloud account id or you can use `*` to replace, the `${relative_id}` is the resource description section which related to the `${service}`.
"""
pulumi.set(__self__, "actions", actions)
pulumi.set(__self__, "effect", effect)
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter
def actions(self) -> Sequence[str]:
"""
(It has been deprecated from version 1.49.0, and use field 'document' to replace.) List of operations for the `resource`. The format of each item in this list is `${service}:${action_name}`, such as `oss:ListBuckets` and `ecs:Describe*`. The `${service}` can be `ecs`, `oss`, `ots` and so on, the `${action_name}` refers to the name of an api interface which related to the `${service}`.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter
def effect(self) -> str:
"""
(It has been deprecated from version 1.49.0, and use field 'document' to replace.) This parameter indicates whether or not the `action` is allowed. Valid values are `Allow` and `Deny`.
"""
return pulumi.get(self, "effect")
@property
@pulumi.getter
def resources(self) -> Sequence[str]:
"""
(It has been deprecated from version 1.49.0, and use field 'document' to replace.) List of specific objects which will be authorized. The format of each item in this list is `acs:${service}:${region}:${account_id}:${relative_id}`, such as `acs:ecs:*:*:instance/inst-002` and `acs:oss:*:1234567890000:mybucket`. The `${service}` can be `ecs`, `oss`, `ots` and so on, the `${region}` is the region info which can use `*` replace when it is not supplied, the `${account_id}` refers to someone's Alicloud account id or you can use `*` to replace, the `${relative_id}` is the resource description section which related to the `${service}`.
"""
return pulumi.get(self, "resources")
@pulumi.output_type
class GetGroupsGroupResult(dict):
def __init__(__self__, *,
comments: str,
name: str):
"""
:param str comments: Comments of the group.
:param str name: Name of the group.
"""
pulumi.set(__self__, "comments", comments)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def comments(self) -> str:
"""
Comments of the group.
"""
return pulumi.get(self, "comments")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the group.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetPoliciesPolicyResult(dict):
def __init__(__self__, *,
attachment_count: int,
create_date: str,
default_version: str,
description: str,
document: str,
id: str,
name: str,
policy_document: str,
policy_name: str,
type: str,
update_date: str,
user_name: str,
version_id: str):
"""
:param int attachment_count: Attachment count of the policy.
:param str create_date: Creation date of the policy.
:param str default_version: Default version of the policy.
:param str description: Description of the policy.
:param str document: Policy document of the policy.
:param str name: Name of the policy.
:param str policy_document: Policy document of the policy.
:param str policy_name: Name of the policy.
:param str type: Filter results by a specific policy type. Valid values are `Custom` and `System`.
:param str update_date: Update date of the policy.
:param str user_name: Filter results by a specific user name. Returned policies are attached to the specified user.
:param str version_id: The ID of default policy.
"""
pulumi.set(__self__, "attachment_count", attachment_count)
pulumi.set(__self__, "create_date", create_date)
pulumi.set(__self__, "default_version", default_version)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "document", document)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "policy_document", policy_document)
pulumi.set(__self__, "policy_name", policy_name)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "update_date", update_date)
pulumi.set(__self__, "user_name", user_name)
pulumi.set(__self__, "version_id", version_id)
@property
@pulumi.getter(name="attachmentCount")
def attachment_count(self) -> int:
"""
Attachment count of the policy.
"""
return pulumi.get(self, "attachment_count")
@property
@pulumi.getter(name="createDate")
def create_date(self) -> str:
"""
Creation date of the policy.
"""
return pulumi.get(self, "create_date")
@property
@pulumi.getter(name="defaultVersion")
def default_version(self) -> str:
"""
Default version of the policy.
"""
return pulumi.get(self, "default_version")
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the policy.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def document(self) -> str:
"""
Policy document of the policy.
"""
return pulumi.get(self, "document")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the policy.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyDocument")
def policy_document(self) -> str:
"""
Policy document of the policy.
"""
return pulumi.get(self, "policy_document")
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> str:
"""
Name of the policy.
"""
return pulumi.get(self, "policy_name")
@property
@pulumi.getter
def type(self) -> str:
"""
Filter results by a specific policy type. Valid values are `Custom` and `System`.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updateDate")
def update_date(self) -> str:
"""
Update date of the policy.
"""
return pulumi.get(self, "update_date")
@property
@pulumi.getter(name="userName")
def user_name(self) -> str:
"""
Filter results by a specific user name. Returned policies are attached to the specified user.
"""
return pulumi.get(self, "user_name")
@property
@pulumi.getter(name="versionId")
def version_id(self) -> str:
"""
The ID of default policy.
"""
return pulumi.get(self, "version_id")
@pulumi.output_type
class GetRolesRoleResult(dict):
def __init__(__self__, *,
arn: str,
assume_role_policy_document: str,
create_date: str,
description: str,
document: str,
id: str,
name: str,
update_date: str):
"""
:param str arn: Resource descriptor of the role.
:param str assume_role_policy_document: Authorization strategy of the role. This parameter is deprecated and replaced by `document`.
:param str create_date: Creation date of the role.
:param str description: Description of the role.
:param str document: Authorization strategy of the role.
:param str id: Id of the role.
:param str name: Name of the role.
:param str update_date: Update date of the role.
"""
pulumi.set(__self__, "arn", arn)
pulumi.set(__self__, "assume_role_policy_document", assume_role_policy_document)
pulumi.set(__self__, "create_date", create_date)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "document", document)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "update_date", update_date)
@property
@pulumi.getter
def arn(self) -> str:
"""
Resource descriptor of the role.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="assumeRolePolicyDocument")
def assume_role_policy_document(self) -> str:
"""
Authorization strategy of the role. This parameter is deprecated and replaced by `document`.
"""
return pulumi.get(self, "assume_role_policy_document")
@property
@pulumi.getter(name="createDate")
def create_date(self) -> str:
"""
Creation date of the role.
"""
return pulumi.get(self, "create_date")
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the role.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def document(self) -> str:
"""
Authorization strategy of the role.
"""
return pulumi.get(self, "document")
@property
@pulumi.getter
def id(self) -> str:
"""
Id of the role.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the role.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="updateDate")
def update_date(self) -> str:
"""
Update date of the role.
"""
return pulumi.get(self, "update_date")
@pulumi.output_type
class GetSamlProvidersProviderResult(dict):
def __init__(__self__, *,
arn: str,
description: str,
encodedsaml_metadata_document: str,
id: str,
saml_provider_name: str,
update_date: str):
"""
:param str arn: The Alibaba Cloud Resource Name (ARN) of the IdP.
:param str description: The description of SAML Provider.
:param str encodedsaml_metadata_document: The encodedsaml metadata document.
:param str id: The ID of the SAML Provider.
:param str saml_provider_name: The saml provider name.
:param str update_date: The update time.
"""
pulumi.set(__self__, "arn", arn)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "encodedsaml_metadata_document", encodedsaml_metadata_document)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "saml_provider_name", saml_provider_name)
pulumi.set(__self__, "update_date", update_date)
@property
@pulumi.getter
def arn(self) -> str:
"""
The Alibaba Cloud Resource Name (ARN) of the IdP.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def description(self) -> str:
"""
The description of SAML Provider.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="encodedsamlMetadataDocument")
def encodedsaml_metadata_document(self) -> str:
"""
The encodedsaml metadata document.
"""
return pulumi.get(self, "encodedsaml_metadata_document")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the SAML Provider.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="samlProviderName")
def saml_provider_name(self) -> str:
"""
The saml provider name.
"""
return pulumi.get(self, "saml_provider_name")
@property
@pulumi.getter(name="updateDate")
def update_date(self) -> str:
"""
The update time.
"""
return pulumi.get(self, "update_date")
@pulumi.output_type
class GetUsersUserResult(dict):
def __init__(__self__, *,
create_date: str,
id: str,
last_login_date: str,
name: str):
"""
:param str create_date: Creation date of the user.
:param str id: The original id is user name, but it is user id in 1.37.0+.
:param str last_login_date: Last login date of the user. Removed from version 1.79.0.
:param str name: Name of the user.
"""
pulumi.set(__self__, "create_date", create_date)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "last_login_date", last_login_date)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="createDate")
def create_date(self) -> str:
"""
Creation date of the user.
"""
return pulumi.get(self, "create_date")
@property
@pulumi.getter
def id(self) -> str:
"""
The original id is user name, but it is user id in 1.37.0+.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastLoginDate")
def last_login_date(self) -> str:
"""
Last login date of the user. Removed from version 1.79.0.
"""
return pulumi.get(self, "last_login_date")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the user.
"""
return pulumi.get(self, "name")
| 34.455947 | 674 | 0.601803 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'PolicyStatement',
'GetGroupsGroupResult',
'GetPoliciesPolicyResult',
'GetRolesRoleResult',
'GetSamlProvidersProviderResult',
'GetUsersUserResult',
]
@pulumi.output_type
class PolicyStatement(dict):
def __init__(__self__, *,
actions: Sequence[str],
effect: str,
resources: Sequence[str]):
pulumi.set(__self__, "actions", actions)
pulumi.set(__self__, "effect", effect)
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter
def actions(self) -> Sequence[str]:
return pulumi.get(self, "actions")
@property
@pulumi.getter
def effect(self) -> str:
return pulumi.get(self, "effect")
@property
@pulumi.getter
def resources(self) -> Sequence[str]:
return pulumi.get(self, "resources")
@pulumi.output_type
class GetGroupsGroupResult(dict):
def __init__(__self__, *,
comments: str,
name: str):
pulumi.set(__self__, "comments", comments)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def comments(self) -> str:
return pulumi.get(self, "comments")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@pulumi.output_type
class GetPoliciesPolicyResult(dict):
def __init__(__self__, *,
attachment_count: int,
create_date: str,
default_version: str,
description: str,
document: str,
id: str,
name: str,
policy_document: str,
policy_name: str,
type: str,
update_date: str,
user_name: str,
version_id: str):
pulumi.set(__self__, "attachment_count", attachment_count)
pulumi.set(__self__, "create_date", create_date)
pulumi.set(__self__, "default_version", default_version)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "document", document)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "policy_document", policy_document)
pulumi.set(__self__, "policy_name", policy_name)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "update_date", update_date)
pulumi.set(__self__, "user_name", user_name)
pulumi.set(__self__, "version_id", version_id)
@property
@pulumi.getter(name="attachmentCount")
def attachment_count(self) -> int:
return pulumi.get(self, "attachment_count")
@property
@pulumi.getter(name="createDate")
def create_date(self) -> str:
return pulumi.get(self, "create_date")
@property
@pulumi.getter(name="defaultVersion")
def default_version(self) -> str:
return pulumi.get(self, "default_version")
@property
@pulumi.getter
def description(self) -> str:
return pulumi.get(self, "description")
@property
@pulumi.getter
def document(self) -> str:
return pulumi.get(self, "document")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyDocument")
def policy_document(self) -> str:
return pulumi.get(self, "policy_document")
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> str:
return pulumi.get(self, "policy_name")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updateDate")
def update_date(self) -> str:
return pulumi.get(self, "update_date")
@property
@pulumi.getter(name="userName")
def user_name(self) -> str:
return pulumi.get(self, "user_name")
@property
@pulumi.getter(name="versionId")
def version_id(self) -> str:
return pulumi.get(self, "version_id")
@pulumi.output_type
class GetRolesRoleResult(dict):
def __init__(__self__, *,
arn: str,
assume_role_policy_document: str,
create_date: str,
description: str,
document: str,
id: str,
name: str,
update_date: str):
pulumi.set(__self__, "arn", arn)
pulumi.set(__self__, "assume_role_policy_document", assume_role_policy_document)
pulumi.set(__self__, "create_date", create_date)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "document", document)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "update_date", update_date)
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="assumeRolePolicyDocument")
def assume_role_policy_document(self) -> str:
return pulumi.get(self, "assume_role_policy_document")
@property
@pulumi.getter(name="createDate")
def create_date(self) -> str:
return pulumi.get(self, "create_date")
@property
@pulumi.getter
def description(self) -> str:
return pulumi.get(self, "description")
@property
@pulumi.getter
def document(self) -> str:
return pulumi.get(self, "document")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="updateDate")
def update_date(self) -> str:
return pulumi.get(self, "update_date")
@pulumi.output_type
class GetSamlProvidersProviderResult(dict):
def __init__(__self__, *,
arn: str,
description: str,
encodedsaml_metadata_document: str,
id: str,
saml_provider_name: str,
update_date: str):
pulumi.set(__self__, "arn", arn)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "encodedsaml_metadata_document", encodedsaml_metadata_document)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "saml_provider_name", saml_provider_name)
pulumi.set(__self__, "update_date", update_date)
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter
def description(self) -> str:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="encodedsamlMetadataDocument")
def encodedsaml_metadata_document(self) -> str:
return pulumi.get(self, "encodedsaml_metadata_document")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="samlProviderName")
def saml_provider_name(self) -> str:
return pulumi.get(self, "saml_provider_name")
@property
@pulumi.getter(name="updateDate")
def update_date(self) -> str:
return pulumi.get(self, "update_date")
@pulumi.output_type
class GetUsersUserResult(dict):
def __init__(__self__, *,
create_date: str,
id: str,
last_login_date: str,
name: str):
pulumi.set(__self__, "create_date", create_date)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "last_login_date", last_login_date)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="createDate")
def create_date(self) -> str:
return pulumi.get(self, "create_date")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastLoginDate")
def last_login_date(self) -> str:
return pulumi.get(self, "last_login_date")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
| true | true |
f73c9c126705b5ee90f5d84cab58abc47b59ca61 | 485 | py | Python | src/main/python/combiner.py | boom-roasted/ImageWAO | 944505dab1a7c97b8eae2bf9fb30006d0f471f89 | [
"MIT"
] | 1 | 2020-03-22T01:52:52.000Z | 2020-03-22T01:52:52.000Z | src/main/python/combiner.py | leftaltkey/ImageWAO | 944505dab1a7c97b8eae2bf9fb30006d0f471f89 | [
"MIT"
] | 2 | 2021-06-08T21:12:47.000Z | 2021-06-08T21:30:32.000Z | src/main/python/combiner.py | leftaltkey/ImageWAO | 944505dab1a7c97b8eae2bf9fb30006d0f471f89 | [
"MIT"
] | null | null | null | from fbs_runtime.application_context import cached_property
from imagewao import QImageWAO
class Combiner:
def __init__(self, ctx):
self.ctx = ctx
@cached_property
def window(self):
return QImageWAO()
def run(self):
with open(self.ctx.get_resource("style.qss")) as f:
sheet = f.read()
self.ctx.app.setStyleSheet(sheet)
self.window.resize(1050, 650)
self.window.show()
return self.ctx.app.exec_()
| 23.095238 | 59 | 0.639175 | from fbs_runtime.application_context import cached_property
from imagewao import QImageWAO
class Combiner:
def __init__(self, ctx):
self.ctx = ctx
@cached_property
def window(self):
return QImageWAO()
def run(self):
with open(self.ctx.get_resource("style.qss")) as f:
sheet = f.read()
self.ctx.app.setStyleSheet(sheet)
self.window.resize(1050, 650)
self.window.show()
return self.ctx.app.exec_()
| true | true |
f73c9c1f18b63e8453b93ea1488004fa78cc13f1 | 19,780 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/web_infrastructure/deploy_helper.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 22 | 2021-07-16T08:11:22.000Z | 2022-03-31T07:15:34.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/web_infrastructure/deploy_helper.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/web_infrastructure/deploy_helper.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 39 | 2021-07-05T02:31:42.000Z | 2022-03-31T02:46:03.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: deploy_helper
author: "Ramon de la Fuente (@ramondelafuente)"
short_description: Manages some of the steps common in deploying projects.
description:
- The Deploy Helper manages some of the steps common in deploying software.
It creates a folder structure, manages a symlink for the current release
and cleans up old releases.
- "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact.
C(project_path), whatever you set in the path parameter,
C(current_path), the path to the symlink that points to the active release,
C(releases_path), the path to the folder to keep releases in,
C(shared_path), the path to the folder to keep shared resources in,
C(unfinished_filename), the file to check for to recognize unfinished builds,
C(previous_release), the release the 'current' symlink is pointing to,
C(previous_release_path), the full path to the 'current' symlink target,
C(new_release), either the 'release' parameter or a generated timestamp,
C(new_release_path), the path to the new release folder (not created by the module)."
options:
path:
type: path
required: True
aliases: ['dest']
description:
- the root path of the project. Alias I(dest).
Returned in the C(deploy_helper.project_path) fact.
state:
type: str
description:
- the state of the project.
C(query) will only gather facts,
C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
C(finalize) will remove the unfinished_filename file, create a symlink to the newly
deployed release and optionally clean old releases,
C(clean) will remove failed & old releases,
C(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with C(state=absent))
choices: [ present, finalize, absent, clean, query ]
default: present
release:
type: str
description:
- the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize).
You can use the generated fact C(release={{ deploy_helper.new_release }}).
releases_path:
type: str
description:
- the name of the folder that will hold the releases. This can be relative to C(path) or absolute.
Returned in the C(deploy_helper.releases_path) fact.
default: releases
shared_path:
type: path
description:
- the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute.
If this is set to an empty string, no shared folder will be created.
Returned in the C(deploy_helper.shared_path) fact.
default: shared
current_path:
type: path
description:
- the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean).
Returned in the C(deploy_helper.current_path) fact.
default: current
unfinished_filename:
type: str
description:
- the name of the file that indicates a deploy has not finished. All folders in the releases_path that
contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is
automatically deleted from the I(new_release_path) during C(state=finalize).
default: DEPLOY_UNFINISHED
clean:
description:
- Whether to run the clean procedure in case of C(state=finalize).
type: bool
default: 'yes'
keep_releases:
type: int
description:
- the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds
will be deleted first, so only correct releases will count. The current version will not count.
default: 5
notes:
- Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden
parameters to both calls, otherwise the second call will overwrite the facts of the first one.
- When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
new naming strategy without problems.
- Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
unless you pass your own release name with C(release). Due to the nature of deploying software, this should not
be much of a problem.
extends_documentation_fragment: files
'''
EXAMPLES = '''
# General explanation, starting with an example folder structure for a project:
# root:
# releases:
# - 20140415234508
# - 20140415235146
# - 20140416082818
#
# shared:
# - sessions
# - uploads
#
# current: releases/20140416082818
# The 'releases' folder holds all the available releases. A release is a complete build of the application being
# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
# git tags or commit hashes.
#
# During a deploy, a new folder should be created in the releases folder and any build steps required should be
# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
# with a link to this build.
#
# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
#
# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
# release is reduced to the time it takes to switch the link.
#
# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
# procedure to remove it during cleanup.
# Typical usage
- name: Initialize the deploy root and gather facts
community.general.deploy_helper:
path: /path/to/root
- name: Clone the project to the new release folder
ansible.builtin.git:
repo: ansible.builtin.git://foosball.example.org/path/to/repo.git
dest: '{{ deploy_helper.new_release_path }}'
version: v1.1.1
- name: Add an unfinished file, to allow cleanup on successful finalize
ansible.builtin.file:
path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
state: touch
- name: Perform some build steps, like running your dependency manager for example
composer:
command: install
working_dir: '{{ deploy_helper.new_release_path }}'
- name: Create some folders in the shared folder
ansible.builtin.file:
path: '{{ deploy_helper.shared_path }}/{{ item }}'
state: directory
with_items:
- sessions
- uploads
- name: Add symlinks from the new release to the shared folder
ansible.builtin.file:
path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
state: link
with_items:
- path: app/sessions
src: sessions
- path: web/uploads
src: uploads
- name: Finalize the deploy, removing the unfinished file and switching the symlink
community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
# Retrieving facts before running a deploy
- name: Run 'state=query' to gather facts without changing anything
community.general.deploy_helper:
path: /path/to/root
state: query
# Remember to set the 'release' parameter when you actually call 'state=present' later
- name: Initialize the deploy root
community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: present
# all paths can be absolute or relative (to the 'path' parameter)
- community.general.deploy_helper:
path: /path/to/root
releases_path: /var/www/project/releases
shared_path: /var/www/shared
current_path: /var/www/active
# Using your own naming strategy for releases (a version tag in this case):
- community.general.deploy_helper:
path: /path/to/root
release: v1.1.1
state: present
- community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
# Using a different unfinished_filename:
- community.general.deploy_helper:
path: /path/to/root
unfinished_filename: README.md
release: '{{ deploy_helper.new_release }}'
state: finalize
# Postponing the cleanup of older builds:
- community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
clean: False
- community.general.deploy_helper:
path: /path/to/root
state: clean
# Or running the cleanup ahead of the new deploy
- community.general.deploy_helper:
path: /path/to/root
state: clean
- community.general.deploy_helper:
path: /path/to/root
state: present
# Keeping more old releases:
- community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
keep_releases: 10
# Or, if you use 'clean=false' on finalize:
- community.general.deploy_helper:
path: /path/to/root
state: clean
keep_releases: 10
# Removing the entire project root folder
- community.general.deploy_helper:
path: /path/to/root
state: absent
# Debugging the facts returned by the module
- community.general.deploy_helper:
path: /path/to/root
- ansible.builtin.debug:
var: deploy_helper
'''
import os
import shutil
import time
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
class DeployHelper(object):
def __init__(self, module):
self.module = module
self.file_args = module.load_file_common_arguments(module.params)
self.clean = module.params['clean']
self.current_path = module.params['current_path']
self.keep_releases = module.params['keep_releases']
self.path = module.params['path']
self.release = module.params['release']
self.releases_path = module.params['releases_path']
self.shared_path = module.params['shared_path']
self.state = module.params['state']
self.unfinished_filename = module.params['unfinished_filename']
def gather_facts(self):
current_path = os.path.join(self.path, self.current_path)
releases_path = os.path.join(self.path, self.releases_path)
if self.shared_path:
shared_path = os.path.join(self.path, self.shared_path)
else:
shared_path = None
previous_release, previous_release_path = self._get_last_release(current_path)
if not self.release and (self.state == 'query' or self.state == 'present'):
self.release = time.strftime("%Y%m%d%H%M%S")
if self.release:
new_release_path = os.path.join(releases_path, self.release)
else:
new_release_path = None
return {
'project_path': self.path,
'current_path': current_path,
'releases_path': releases_path,
'shared_path': shared_path,
'previous_release': previous_release,
'previous_release_path': previous_release_path,
'new_release': self.release,
'new_release_path': new_release_path,
'unfinished_filename': self.unfinished_filename
}
def delete_path(self, path):
if not os.path.lexists(path):
return False
if not os.path.isdir(path):
self.module.fail_json(msg="%s exists but is not a directory" % path)
if not self.module.check_mode:
try:
shutil.rmtree(path, ignore_errors=False)
except Exception as e:
self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc())
return True
def create_path(self, path):
changed = False
if not os.path.lexists(path):
changed = True
if not self.module.check_mode:
os.makedirs(path)
elif not os.path.isdir(path):
self.module.fail_json(msg="%s exists but is not a directory" % path)
changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
return changed
def check_link(self, path):
if os.path.lexists(path):
if not os.path.islink(path):
self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
def create_link(self, source, link_name):
if os.path.islink(link_name):
norm_link = os.path.normpath(os.path.realpath(link_name))
norm_source = os.path.normpath(os.path.realpath(source))
if norm_link == norm_source:
changed = False
else:
changed = True
if not self.module.check_mode:
if not os.path.lexists(source):
self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
tmp_link_name = link_name + '.' + self.unfinished_filename
if os.path.islink(tmp_link_name):
os.unlink(tmp_link_name)
os.symlink(source, tmp_link_name)
os.rename(tmp_link_name, link_name)
else:
changed = True
if not self.module.check_mode:
os.symlink(source, link_name)
return changed
def remove_unfinished_file(self, new_release_path):
changed = False
unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
if os.path.lexists(unfinished_file_path):
changed = True
if not self.module.check_mode:
os.remove(unfinished_file_path)
return changed
def remove_unfinished_builds(self, releases_path):
changes = 0
for release in os.listdir(releases_path):
if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
if self.module.check_mode:
changes += 1
else:
changes += self.delete_path(os.path.join(releases_path, release))
return changes
def remove_unfinished_link(self, path):
changed = False
if not self.release:
return changed
tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
if not self.module.check_mode and os.path.exists(tmp_link_name):
changed = True
os.remove(tmp_link_name)
return changed
def cleanup(self, releases_path, reserve_version):
changes = 0
if os.path.lexists(releases_path):
releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
try:
releases.remove(reserve_version)
except ValueError:
pass
if not self.module.check_mode:
releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
for release in releases[self.keep_releases:]:
changes += self.delete_path(os.path.join(releases_path, release))
elif len(releases) > self.keep_releases:
changes += (len(releases) - self.keep_releases)
return changes
def _get_file_args(self, path):
file_args = self.file_args.copy()
file_args['path'] = path
return file_args
def _get_last_release(self, current_path):
previous_release = None
previous_release_path = None
if os.path.lexists(current_path):
previous_release_path = os.path.realpath(current_path)
previous_release = os.path.basename(previous_release_path)
return previous_release, previous_release_path
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(aliases=['dest'], required=True, type='path'),
release=dict(type='str'),
releases_path=dict(type='str', default='releases'),
shared_path=dict(type='path', default='shared'),
current_path=dict(type='path', default='current'),
keep_releases=dict(type='int', default=5),
clean=dict(type='bool', default=True),
unfinished_filename=dict(type='str', default='DEPLOY_UNFINISHED'),
state=dict(choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
),
required_if=[
('state', 'finalize', ['release']),
],
add_file_common_args=True,
supports_check_mode=True
)
deploy_helper = DeployHelper(module)
facts = deploy_helper.gather_facts()
result = {
'state': deploy_helper.state
}
changes = 0
if deploy_helper.state == 'query':
result['ansible_facts'] = {'deploy_helper': facts}
elif deploy_helper.state == 'present':
deploy_helper.check_link(facts['current_path'])
changes += deploy_helper.create_path(facts['project_path'])
changes += deploy_helper.create_path(facts['releases_path'])
if deploy_helper.shared_path:
changes += deploy_helper.create_path(facts['shared_path'])
result['ansible_facts'] = {'deploy_helper': facts}
elif deploy_helper.state == 'finalize':
if deploy_helper.keep_releases <= 0:
module.fail_json(msg="'keep_releases' should be at least 1")
changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
if deploy_helper.clean:
changes += deploy_helper.remove_unfinished_link(facts['project_path'])
changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
elif deploy_helper.state == 'clean':
changes += deploy_helper.remove_unfinished_link(facts['project_path'])
changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
elif deploy_helper.state == 'absent':
# destroy the facts
result['ansible_facts'] = {'deploy_helper': []}
changes += deploy_helper.delete_path(facts['project_path'])
if changes > 0:
result['changed'] = True
else:
result['changed'] = False
module.exit_json(**result)
if __name__ == '__main__':
main()
| 37.533207 | 120 | 0.673357 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: deploy_helper
author: "Ramon de la Fuente (@ramondelafuente)"
short_description: Manages some of the steps common in deploying projects.
description:
- The Deploy Helper manages some of the steps common in deploying software.
It creates a folder structure, manages a symlink for the current release
and cleans up old releases.
- "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact.
C(project_path), whatever you set in the path parameter,
C(current_path), the path to the symlink that points to the active release,
C(releases_path), the path to the folder to keep releases in,
C(shared_path), the path to the folder to keep shared resources in,
C(unfinished_filename), the file to check for to recognize unfinished builds,
C(previous_release), the release the 'current' symlink is pointing to,
C(previous_release_path), the full path to the 'current' symlink target,
C(new_release), either the 'release' parameter or a generated timestamp,
C(new_release_path), the path to the new release folder (not created by the module)."
options:
path:
type: path
required: True
aliases: ['dest']
description:
- the root path of the project. Alias I(dest).
Returned in the C(deploy_helper.project_path) fact.
state:
type: str
description:
- the state of the project.
C(query) will only gather facts,
C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
C(finalize) will remove the unfinished_filename file, create a symlink to the newly
deployed release and optionally clean old releases,
C(clean) will remove failed & old releases,
C(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with C(state=absent))
choices: [ present, finalize, absent, clean, query ]
default: present
release:
type: str
description:
- the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize).
You can use the generated fact C(release={{ deploy_helper.new_release }}).
releases_path:
type: str
description:
- the name of the folder that will hold the releases. This can be relative to C(path) or absolute.
Returned in the C(deploy_helper.releases_path) fact.
default: releases
shared_path:
type: path
description:
- the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute.
If this is set to an empty string, no shared folder will be created.
Returned in the C(deploy_helper.shared_path) fact.
default: shared
current_path:
type: path
description:
- the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean).
Returned in the C(deploy_helper.current_path) fact.
default: current
unfinished_filename:
type: str
description:
- the name of the file that indicates a deploy has not finished. All folders in the releases_path that
contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is
automatically deleted from the I(new_release_path) during C(state=finalize).
default: DEPLOY_UNFINISHED
clean:
description:
- Whether to run the clean procedure in case of C(state=finalize).
type: bool
default: 'yes'
keep_releases:
type: int
description:
- the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds
will be deleted first, so only correct releases will count. The current version will not count.
default: 5
notes:
- Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden
parameters to both calls, otherwise the second call will overwrite the facts of the first one.
- When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
new naming strategy without problems.
- Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
unless you pass your own release name with C(release). Due to the nature of deploying software, this should not
be much of a problem.
extends_documentation_fragment: files
'''
EXAMPLES = '''
# General explanation, starting with an example folder structure for a project:
# root:
# releases:
# - 20140415234508
# - 20140415235146
# - 20140416082818
#
# shared:
# - sessions
# - uploads
#
# current: releases/20140416082818
# The 'releases' folder holds all the available releases. A release is a complete build of the application being
# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
# git tags or commit hashes.
#
# During a deploy, a new folder should be created in the releases folder and any build steps required should be
# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
# with a link to this build.
#
# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
#
# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
# release is reduced to the time it takes to switch the link.
#
# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
# procedure to remove it during cleanup.
# Typical usage
- name: Initialize the deploy root and gather facts
community.general.deploy_helper:
path: /path/to/root
- name: Clone the project to the new release folder
ansible.builtin.git:
repo: ansible.builtin.git://foosball.example.org/path/to/repo.git
dest: '{{ deploy_helper.new_release_path }}'
version: v1.1.1
- name: Add an unfinished file, to allow cleanup on successful finalize
ansible.builtin.file:
path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
state: touch
- name: Perform some build steps, like running your dependency manager for example
composer:
command: install
working_dir: '{{ deploy_helper.new_release_path }}'
- name: Create some folders in the shared folder
ansible.builtin.file:
path: '{{ deploy_helper.shared_path }}/{{ item }}'
state: directory
with_items:
- sessions
- uploads
- name: Add symlinks from the new release to the shared folder
ansible.builtin.file:
path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
state: link
with_items:
- path: app/sessions
src: sessions
- path: web/uploads
src: uploads
- name: Finalize the deploy, removing the unfinished file and switching the symlink
community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
# Retrieving facts before running a deploy
- name: Run 'state=query' to gather facts without changing anything
community.general.deploy_helper:
path: /path/to/root
state: query
# Remember to set the 'release' parameter when you actually call 'state=present' later
- name: Initialize the deploy root
community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: present
# all paths can be absolute or relative (to the 'path' parameter)
- community.general.deploy_helper:
path: /path/to/root
releases_path: /var/www/project/releases
shared_path: /var/www/shared
current_path: /var/www/active
# Using your own naming strategy for releases (a version tag in this case):
- community.general.deploy_helper:
path: /path/to/root
release: v1.1.1
state: present
- community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
# Using a different unfinished_filename:
- community.general.deploy_helper:
path: /path/to/root
unfinished_filename: README.md
release: '{{ deploy_helper.new_release }}'
state: finalize
# Postponing the cleanup of older builds:
- community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
clean: False
- community.general.deploy_helper:
path: /path/to/root
state: clean
# Or running the cleanup ahead of the new deploy
- community.general.deploy_helper:
path: /path/to/root
state: clean
- community.general.deploy_helper:
path: /path/to/root
state: present
# Keeping more old releases:
- community.general.deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
keep_releases: 10
# Or, if you use 'clean=false' on finalize:
- community.general.deploy_helper:
path: /path/to/root
state: clean
keep_releases: 10
# Removing the entire project root folder
- community.general.deploy_helper:
path: /path/to/root
state: absent
# Debugging the facts returned by the module
- community.general.deploy_helper:
path: /path/to/root
- ansible.builtin.debug:
var: deploy_helper
'''
import os
import shutil
import time
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
class DeployHelper(object):
def __init__(self, module):
self.module = module
self.file_args = module.load_file_common_arguments(module.params)
self.clean = module.params['clean']
self.current_path = module.params['current_path']
self.keep_releases = module.params['keep_releases']
self.path = module.params['path']
self.release = module.params['release']
self.releases_path = module.params['releases_path']
self.shared_path = module.params['shared_path']
self.state = module.params['state']
self.unfinished_filename = module.params['unfinished_filename']
def gather_facts(self):
current_path = os.path.join(self.path, self.current_path)
releases_path = os.path.join(self.path, self.releases_path)
if self.shared_path:
shared_path = os.path.join(self.path, self.shared_path)
else:
shared_path = None
previous_release, previous_release_path = self._get_last_release(current_path)
if not self.release and (self.state == 'query' or self.state == 'present'):
self.release = time.strftime("%Y%m%d%H%M%S")
if self.release:
new_release_path = os.path.join(releases_path, self.release)
else:
new_release_path = None
return {
'project_path': self.path,
'current_path': current_path,
'releases_path': releases_path,
'shared_path': shared_path,
'previous_release': previous_release,
'previous_release_path': previous_release_path,
'new_release': self.release,
'new_release_path': new_release_path,
'unfinished_filename': self.unfinished_filename
}
def delete_path(self, path):
if not os.path.lexists(path):
return False
if not os.path.isdir(path):
self.module.fail_json(msg="%s exists but is not a directory" % path)
if not self.module.check_mode:
try:
shutil.rmtree(path, ignore_errors=False)
except Exception as e:
self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc())
return True
def create_path(self, path):
changed = False
if not os.path.lexists(path):
changed = True
if not self.module.check_mode:
os.makedirs(path)
elif not os.path.isdir(path):
self.module.fail_json(msg="%s exists but is not a directory" % path)
changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
return changed
def check_link(self, path):
if os.path.lexists(path):
if not os.path.islink(path):
self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
def create_link(self, source, link_name):
if os.path.islink(link_name):
norm_link = os.path.normpath(os.path.realpath(link_name))
norm_source = os.path.normpath(os.path.realpath(source))
if norm_link == norm_source:
changed = False
else:
changed = True
if not self.module.check_mode:
if not os.path.lexists(source):
self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
tmp_link_name = link_name + '.' + self.unfinished_filename
if os.path.islink(tmp_link_name):
os.unlink(tmp_link_name)
os.symlink(source, tmp_link_name)
os.rename(tmp_link_name, link_name)
else:
changed = True
if not self.module.check_mode:
os.symlink(source, link_name)
return changed
def remove_unfinished_file(self, new_release_path):
changed = False
unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
if os.path.lexists(unfinished_file_path):
changed = True
if not self.module.check_mode:
os.remove(unfinished_file_path)
return changed
def remove_unfinished_builds(self, releases_path):
changes = 0
for release in os.listdir(releases_path):
if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
if self.module.check_mode:
changes += 1
else:
changes += self.delete_path(os.path.join(releases_path, release))
return changes
def remove_unfinished_link(self, path):
changed = False
if not self.release:
return changed
tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
if not self.module.check_mode and os.path.exists(tmp_link_name):
changed = True
os.remove(tmp_link_name)
return changed
def cleanup(self, releases_path, reserve_version):
changes = 0
if os.path.lexists(releases_path):
releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
try:
releases.remove(reserve_version)
except ValueError:
pass
if not self.module.check_mode:
releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
for release in releases[self.keep_releases:]:
changes += self.delete_path(os.path.join(releases_path, release))
elif len(releases) > self.keep_releases:
changes += (len(releases) - self.keep_releases)
return changes
def _get_file_args(self, path):
file_args = self.file_args.copy()
file_args['path'] = path
return file_args
def _get_last_release(self, current_path):
previous_release = None
previous_release_path = None
if os.path.lexists(current_path):
previous_release_path = os.path.realpath(current_path)
previous_release = os.path.basename(previous_release_path)
return previous_release, previous_release_path
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(aliases=['dest'], required=True, type='path'),
release=dict(type='str'),
releases_path=dict(type='str', default='releases'),
shared_path=dict(type='path', default='shared'),
current_path=dict(type='path', default='current'),
keep_releases=dict(type='int', default=5),
clean=dict(type='bool', default=True),
unfinished_filename=dict(type='str', default='DEPLOY_UNFINISHED'),
state=dict(choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
),
required_if=[
('state', 'finalize', ['release']),
],
add_file_common_args=True,
supports_check_mode=True
)
deploy_helper = DeployHelper(module)
facts = deploy_helper.gather_facts()
result = {
'state': deploy_helper.state
}
changes = 0
if deploy_helper.state == 'query':
result['ansible_facts'] = {'deploy_helper': facts}
elif deploy_helper.state == 'present':
deploy_helper.check_link(facts['current_path'])
changes += deploy_helper.create_path(facts['project_path'])
changes += deploy_helper.create_path(facts['releases_path'])
if deploy_helper.shared_path:
changes += deploy_helper.create_path(facts['shared_path'])
result['ansible_facts'] = {'deploy_helper': facts}
elif deploy_helper.state == 'finalize':
if deploy_helper.keep_releases <= 0:
module.fail_json(msg="'keep_releases' should be at least 1")
changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
if deploy_helper.clean:
changes += deploy_helper.remove_unfinished_link(facts['project_path'])
changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
elif deploy_helper.state == 'clean':
changes += deploy_helper.remove_unfinished_link(facts['project_path'])
changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
elif deploy_helper.state == 'absent':
# destroy the facts
result['ansible_facts'] = {'deploy_helper': []}
changes += deploy_helper.delete_path(facts['project_path'])
if changes > 0:
result['changed'] = True
else:
result['changed'] = False
module.exit_json(**result)
if __name__ == '__main__':
main()
| true | true |
f73c9c82133d87ced8836742df68c56eec27f350 | 4,853 | py | Python | gameApp/views.py | cs-fullstack-2019-spring/django-mini-project4-thomas-rob | d80caea3f5655bd1be2b0114eb7be2cb4a4c5a53 | [
"Apache-2.0"
] | null | null | null | gameApp/views.py | cs-fullstack-2019-spring/django-mini-project4-thomas-rob | d80caea3f5655bd1be2b0114eb7be2cb4a4c5a53 | [
"Apache-2.0"
] | null | null | null | gameApp/views.py | cs-fullstack-2019-spring/django-mini-project4-thomas-rob | d80caea3f5655bd1be2b0114eb7be2cb4a4c5a53 | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.decorators import \
login_required # unused for now but for making sure the person on the page is logged in
from django.shortcuts import render, redirect, get_object_or_404 # pulling redirect
from django.http import HttpResponse # used for testing purposes
# Create your views here.
from .forms import GameModel, GameCollectorModel, GameForm, \
GameCollectorForm # called all forms in models in single line
from django.contrib.auth.models import User # Collects the user form from django to add users
def index(request): # for the rendering of the index page
gameList = GameModel.objects.all() # this collects all games made
# @login_required
def index(request): # for the rendering of the index page
if request.user.is_authenticated: # makes sure the user is logged in to authorize this existing
gameCollector = GameCollectorModel.objects.get(userIDkey=request.user) # this gets the game collector
gameList = GameModel.objects.filter(gameMakeIdKey=gameCollector) # this gets the list of games that person did
else:
gameList = '' # sets gamelist to empty so no games will be displayed
context = \
{
'gameList': gameList # this adds completed game list that will later filter out based on logged in user
}
return render(request, 'gameApp/index.html', context) # this renders the page and start at index
def newUser(request): # for adding a new user
userForm = GameCollectorForm(request.POST or None) # collects the form necessary to make a new user
if userForm.is_valid(): # confirms that the parameters are met
if request.POST['password1'] == request.POST['password2']: # adds additional parameter
newUser = User.objects.create_user(request.POST['username'], '',
request.POST['password1']) # saves the user for use later
collector = userForm.save(commit=None) # saves the model
collector.userIDkey = newUser # saves the user created as the foreignkey for the game-collector model
collector.save() # saves the form for editing later
return redirect('index') # returns person to index
context = \
{
'form': userForm # gets the form to add new user and uses an easy to read name
}
return render(request, 'gameApp/newUser.html',
context) # renders the newUser page if they don't have the information entered
def newGame(request): # this will render the newgame page to save a new game
gameForm = GameForm() # this collects the form to use
context = \
{
'form': gameForm # this changed the name to fit the context for easy to input
}
return render(request, 'gameApp/newGame.html', context) # renders the page to make the game
def edit(request, gameID):
item = get_object_or_404(GameModel, pk=gameID) # gets the game model to be edited
editForm = GameForm(request.POST or None,
instance=item) # grabs the form and fills it out with the info from the model
if request.method == 'POST': # makes sure the it has retrieved the information before continuing this route
if editForm.is_valid(): # makes sure that the information entered is valid to requirements
item.save() # saves the new information entered to be used later
return redirect('index') # returns to index because you are done, info will display on screen
context = \
{
'form': editForm # this gets the form to be shown and changed
}
return render(request, 'gameApp/editForm.html',
context) # renders the page for viewing the form information to edit
def delete(request, gameID): # starts down the path to delete the game, this build does not have a conformation page
deleteForm = get_object_or_404(GameModel, pk=gameID) # collects the form to delete
deleteForm.delete() # deletes the form IMMEDIATELY UPON CLICKING THE LINK
return redirect('index') # returns to the index without turning back
def saveNewGame(request): # this will upon submitting a new game save it and redirect to the index
gameCollector = GameCollectorModel.objects.get(userIDkey=request.user) # this gets the game collector
gameForm = GameForm(request.POST) # this will retrieve the
# collects the data inputed by user to be saved
newGame = gameForm.save(commit=None) # saves the data for future use
newGame.gameMakeIdKey = gameCollector # makes the gameCollector or person signed in to use
newGame.save() # I ain't loosing no points for not commenting every line of code, this one saves the game made with the foreignkey linked
return redirect('index') # goes back to the index page
| 55.781609 | 141 | 0.696064 | from django.contrib.auth.decorators import \
login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from .forms import GameModel, GameCollectorModel, GameForm, \
GameCollectorForm
from django.contrib.auth.models import User
def index(request):
gameList = GameModel.objects.all()
def index(request):
if request.user.is_authenticated:
gameCollector = GameCollectorModel.objects.get(userIDkey=request.user)
gameList = GameModel.objects.filter(gameMakeIdKey=gameCollector)
else:
gameList = ''
context = \
{
'gameList': gameList
}
return render(request, 'gameApp/index.html', context)
def newUser(request):
userForm = GameCollectorForm(request.POST or None)
if userForm.is_valid():
if request.POST['password1'] == request.POST['password2']:
newUser = User.objects.create_user(request.POST['username'], '',
request.POST['password1'])
collector = userForm.save(commit=None)
collector.userIDkey = newUser
collector.save()
return redirect('index')
context = \
{
'form': userForm
}
return render(request, 'gameApp/newUser.html',
context)
def newGame(request): # this will render the newgame page to save a new game
gameForm = GameForm() # this collects the form to use
context = \
{
'form': gameForm # this changed the name to fit the context for easy to input
}
return render(request, 'gameApp/newGame.html', context) # renders the page to make the game
def edit(request, gameID):
item = get_object_or_404(GameModel, pk=gameID) # gets the game model to be edited
editForm = GameForm(request.POST or None,
instance=item) # grabs the form and fills it out with the info from the model
if request.method == 'POST': # makes sure the it has retrieved the information before continuing this route
if editForm.is_valid(): # makes sure that the information entered is valid to requirements
item.save() # saves the new information entered to be used later
return redirect('index') # returns to index because you are done, info will display on screen
context = \
{
'form': editForm # this gets the form to be shown and changed
}
return render(request, 'gameApp/editForm.html',
context) # renders the page for viewing the form information to edit
def delete(request, gameID): # starts down the path to delete the game, this build does not have a conformation page
deleteForm = get_object_or_404(GameModel, pk=gameID) # collects the form to delete
deleteForm.delete() # deletes the form IMMEDIATELY UPON CLICKING THE LINK
return redirect('index') # returns to the index without turning back
def saveNewGame(request): # this will upon submitting a new game save it and redirect to the index
gameCollector = GameCollectorModel.objects.get(userIDkey=request.user) # this gets the game collector
gameForm = GameForm(request.POST) # this will retrieve the
# collects the data inputed by user to be saved
newGame = gameForm.save(commit=None) # saves the data for future use
newGame.gameMakeIdKey = gameCollector # makes the gameCollector or person signed in to use
newGame.save() # I ain't loosing no points for not commenting every line of code, this one saves the game made with the foreignkey linked
return redirect('index')
| true | true |
f73c9ccedcf7eefff848fca13db014327c33c491 | 4,935 | py | Python | azext_iot/tests/test_iot_utility_unit.py | digimaun/azure-iot-cli-extension | 3970b0441c770ccf206f7c8e4494a30992361a9f | [
"MIT"
] | null | null | null | azext_iot/tests/test_iot_utility_unit.py | digimaun/azure-iot-cli-extension | 3970b0441c770ccf206f7c8e4494a30992361a9f | [
"MIT"
] | 1 | 2020-07-13T19:57:48.000Z | 2020-07-13T19:57:48.000Z | azext_iot/tests/test_iot_utility_unit.py | digimaun/azure-iot-cli-extension | 3970b0441c770ccf206f7c8e4494a30992361a9f | [
"MIT"
] | null | null | null | import pytest
from knack.util import CLIError
from azext_iot.common.utility import validate_min_python_version
from azext_iot.common.deps import ensure_uamqp
from azext_iot._validators import mode2_iot_login_handler
from azext_iot.constants import EVENT_LIB
class TestMinPython():
@pytest.mark.parametrize("pymajor, pyminor", [
(3, 6),
(3, 4),
(2, 7)
])
def test_min_python(self, mocker, pymajor, pyminor):
version_mock = mocker.patch('azext_iot.common.utility.sys.version_info')
version_mock.major = pymajor
version_mock.minor = pyminor
assert validate_min_python_version(2, 7)
@pytest.mark.parametrize("pymajor, pyminor, exception", [
(3, 6, SystemExit),
(3, 4, SystemExit),
(2, 7, SystemExit)
])
def test_min_python_error(self, mocker, pymajor, pyminor, exception):
version_mock = mocker.patch('azext_iot.common.utility.sys.version_info')
version_mock.major = 2
version_mock.minor = 6
with pytest.raises(exception):
validate_min_python_version(pymajor, pyminor)
class TestMode2Handler():
@pytest.mark.parametrize("hub_name, dps_name, login", [
('myhub', '[]', None),
('[]', 'mydps', None),
(None, None, 'mylogin'),
('myhub', '[]', 'mylogin'),
('[]', 'mydps', 'mylogin'),
('[]', '[]', '[]'),
('myhub', '[]', '[]'),
('[]', 'mydps', '[]'),
])
def test_mode2_login(self, mocker, hub_name, dps_name, login):
mock_cmd = mocker.MagicMock(name='mock cmd')
mock_cmd.name = 'iot '
mock_ns = mocker.MagicMock(name='mock ns')
if login != '[]':
mock_ns.login = login
if hub_name != '[]':
mock_ns.hub_name = hub_name
if dps_name != '[]':
mock_ns.dps_name = dps_name
mode2_iot_login_handler(mock_cmd, mock_ns)
@pytest.mark.parametrize("hub_name, dps_name, login", [
(None, None, None)
])
def test_mode2_login_error(self, mocker, hub_name, dps_name, login):
mock_cmd = mocker.MagicMock(name='mock cmd')
mock_cmd.name = 'iot '
mock_ns = mocker.MagicMock(name='mock ns')
if login != '[]':
mock_ns.login = login
if hub_name != '[]':
mock_ns.hub_name = hub_name
if dps_name != '[]':
mock_ns.dps_name = dps_name
with pytest.raises(CLIError):
mode2_iot_login_handler(mock_cmd, mock_ns)
class TestEnsureUamqp():
@pytest.fixture()
def uamqp_scenario(self, mocker):
get_uamqp = mocker.patch('azext_iot.common.deps.get_uamqp_ext_version')
update_uamqp = mocker.patch('azext_iot.common.deps.update_uamqp_ext_version')
installer = mocker.patch('azext_iot.common.deps.install')
installer.return_value = True
get_uamqp.return_value = EVENT_LIB[1]
test_import = mocker.patch('azext_iot.common.deps.test_import')
test_import.return_value = True
m_exit = mocker.patch('azext_iot.common.deps.sys.exit')
return {'get_uamqp': get_uamqp, 'update_uamqp': update_uamqp,
'installer': installer, 'test_import': test_import, 'exit': m_exit}
@pytest.mark.parametrize("case, extra_input, external_input", [
('importerror', None, 'y'),
('importerror', None, 'n'),
('importerror', 'yes;', None),
('compatibility', None, 'y'),
('compatibility', None, 'n'),
('compatibility', 'yes;', None),
('repair', 'repair;', 'y'),
('repair', 'repair;yes;', None),
('repair', 'repair;', 'n')
])
def test_ensure_uamqp_version(self, mocker, uamqp_scenario,
case, extra_input, external_input):
if case == 'importerror':
uamqp_scenario['test_import'].return_value = False
elif case == 'compatibility':
uamqp_scenario['get_uamqp'].return_value = '0.0.0'
from functools import partial
kwargs = {}
user_cancelled = True
if extra_input and 'yes;' in extra_input:
kwargs['yes'] = True
user_cancelled = False
if extra_input and 'repair;' in extra_input:
kwargs['repair'] = True
if external_input:
mocked_input = mocker.patch('azext_iot.common.deps.input')
mocked_input.return_value = external_input
if external_input.lower() == 'y':
user_cancelled = False
method = partial(ensure_uamqp, mocker.MagicMock(), **kwargs)
method()
if user_cancelled:
assert uamqp_scenario['exit'].call_args
else:
install_args = uamqp_scenario['installer'].call_args
assert install_args[0][0] == EVENT_LIB[0]
assert install_args[1]['custom_version'] == '>={},<{}'.format(EVENT_LIB[1], EVENT_LIB[2])
| 36.828358 | 101 | 0.599189 | import pytest
from knack.util import CLIError
from azext_iot.common.utility import validate_min_python_version
from azext_iot.common.deps import ensure_uamqp
from azext_iot._validators import mode2_iot_login_handler
from azext_iot.constants import EVENT_LIB
class TestMinPython():
@pytest.mark.parametrize("pymajor, pyminor", [
(3, 6),
(3, 4),
(2, 7)
])
def test_min_python(self, mocker, pymajor, pyminor):
version_mock = mocker.patch('azext_iot.common.utility.sys.version_info')
version_mock.major = pymajor
version_mock.minor = pyminor
assert validate_min_python_version(2, 7)
@pytest.mark.parametrize("pymajor, pyminor, exception", [
(3, 6, SystemExit),
(3, 4, SystemExit),
(2, 7, SystemExit)
])
def test_min_python_error(self, mocker, pymajor, pyminor, exception):
version_mock = mocker.patch('azext_iot.common.utility.sys.version_info')
version_mock.major = 2
version_mock.minor = 6
with pytest.raises(exception):
validate_min_python_version(pymajor, pyminor)
class TestMode2Handler():
@pytest.mark.parametrize("hub_name, dps_name, login", [
('myhub', '[]', None),
('[]', 'mydps', None),
(None, None, 'mylogin'),
('myhub', '[]', 'mylogin'),
('[]', 'mydps', 'mylogin'),
('[]', '[]', '[]'),
('myhub', '[]', '[]'),
('[]', 'mydps', '[]'),
])
def test_mode2_login(self, mocker, hub_name, dps_name, login):
mock_cmd = mocker.MagicMock(name='mock cmd')
mock_cmd.name = 'iot '
mock_ns = mocker.MagicMock(name='mock ns')
if login != '[]':
mock_ns.login = login
if hub_name != '[]':
mock_ns.hub_name = hub_name
if dps_name != '[]':
mock_ns.dps_name = dps_name
mode2_iot_login_handler(mock_cmd, mock_ns)
@pytest.mark.parametrize("hub_name, dps_name, login", [
(None, None, None)
])
def test_mode2_login_error(self, mocker, hub_name, dps_name, login):
mock_cmd = mocker.MagicMock(name='mock cmd')
mock_cmd.name = 'iot '
mock_ns = mocker.MagicMock(name='mock ns')
if login != '[]':
mock_ns.login = login
if hub_name != '[]':
mock_ns.hub_name = hub_name
if dps_name != '[]':
mock_ns.dps_name = dps_name
with pytest.raises(CLIError):
mode2_iot_login_handler(mock_cmd, mock_ns)
class TestEnsureUamqp():
@pytest.fixture()
def uamqp_scenario(self, mocker):
get_uamqp = mocker.patch('azext_iot.common.deps.get_uamqp_ext_version')
update_uamqp = mocker.patch('azext_iot.common.deps.update_uamqp_ext_version')
installer = mocker.patch('azext_iot.common.deps.install')
installer.return_value = True
get_uamqp.return_value = EVENT_LIB[1]
test_import = mocker.patch('azext_iot.common.deps.test_import')
test_import.return_value = True
m_exit = mocker.patch('azext_iot.common.deps.sys.exit')
return {'get_uamqp': get_uamqp, 'update_uamqp': update_uamqp,
'installer': installer, 'test_import': test_import, 'exit': m_exit}
@pytest.mark.parametrize("case, extra_input, external_input", [
('importerror', None, 'y'),
('importerror', None, 'n'),
('importerror', 'yes;', None),
('compatibility', None, 'y'),
('compatibility', None, 'n'),
('compatibility', 'yes;', None),
('repair', 'repair;', 'y'),
('repair', 'repair;yes;', None),
('repair', 'repair;', 'n')
])
def test_ensure_uamqp_version(self, mocker, uamqp_scenario,
case, extra_input, external_input):
if case == 'importerror':
uamqp_scenario['test_import'].return_value = False
elif case == 'compatibility':
uamqp_scenario['get_uamqp'].return_value = '0.0.0'
from functools import partial
kwargs = {}
user_cancelled = True
if extra_input and 'yes;' in extra_input:
kwargs['yes'] = True
user_cancelled = False
if extra_input and 'repair;' in extra_input:
kwargs['repair'] = True
if external_input:
mocked_input = mocker.patch('azext_iot.common.deps.input')
mocked_input.return_value = external_input
if external_input.lower() == 'y':
user_cancelled = False
method = partial(ensure_uamqp, mocker.MagicMock(), **kwargs)
method()
if user_cancelled:
assert uamqp_scenario['exit'].call_args
else:
install_args = uamqp_scenario['installer'].call_args
assert install_args[0][0] == EVENT_LIB[0]
assert install_args[1]['custom_version'] == '>={},<{}'.format(EVENT_LIB[1], EVENT_LIB[2])
| true | true |
f73c9cd2d12bbf09d2a5a7e3374e24a023d4e895 | 295 | py | Python | ML/Data_Aug.py | PrathmeshBele/Autumn-of-Automation | c33a3725826807f3f9c9427a73922379b07f58b9 | [
"MIT"
] | null | null | null | ML/Data_Aug.py | PrathmeshBele/Autumn-of-Automation | c33a3725826807f3f9c9427a73922379b07f58b9 | [
"MIT"
] | null | null | null | ML/Data_Aug.py | PrathmeshBele/Autumn-of-Automation | c33a3725826807f3f9c9427a73922379b07f58b9 | [
"MIT"
] | null | null | null | import cv2 as cv
import numpy as np
import os
import cv2
pth = '/home/prathmesh/Desktop/UMIC/Autumn-of-Automation/ML/UMIC/Yes'
obj = os.listdir(pth)
for j in obj:
img = cv2.imread(pth + '/' + j)
img_flip_lr = cv2.flip(img, 1)
cv2.imwrite('A{}.jpg'.format(j.replace('.jpg','')), img_flip_lr)
| 26.818182 | 69 | 0.688136 | import cv2 as cv
import numpy as np
import os
import cv2
pth = '/home/prathmesh/Desktop/UMIC/Autumn-of-Automation/ML/UMIC/Yes'
obj = os.listdir(pth)
for j in obj:
img = cv2.imread(pth + '/' + j)
img_flip_lr = cv2.flip(img, 1)
cv2.imwrite('A{}.jpg'.format(j.replace('.jpg','')), img_flip_lr)
| true | true |
f73c9e65fda97de61814f393f9ef74fd59c57a16 | 5,621 | py | Python | CalibMuon/DTCalibration/python/dtT0WireCalibration_cfg.py | samarendran23/cmssw | 849dd9897db9b894ca83e1b630a3c1eecafd6097 | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | CalibMuon/DTCalibration/python/dtT0WireCalibration_cfg.py | samarendran23/cmssw | 849dd9897db9b894ca83e1b630a3c1eecafd6097 | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | CalibMuon/DTCalibration/python/dtT0WireCalibration_cfg.py | p2l1pfp/cmssw | 9bda22bf33ecf18dd19a3af2b3a8cbdb1de556a9 | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.debugModules = cms.untracked.vstring('*')
process.MessageLogger.destinations = cms.untracked.vstring('cerr')
process.MessageLogger.categories.append('resolution')
process.MessageLogger.cerr = cms.untracked.PSet(
FwkReport = cms.untracked.PSet(
limit = cms.untracked.int32(100),
reportEvery = cms.untracked.int32(1000)
),
threshold = cms.untracked.string('DEBUG'),
noLineBreaks = cms.untracked.bool(False),
DEBUG = cms.untracked.PSet(limit = cms.untracked.int32(0)),
INFO = cms.untracked.PSet(limit = cms.untracked.int32(0)),
resolution = cms.untracked.PSet(limit = cms.untracked.int32(-1))
)
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load("Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff")
process.GlobalTag.globaltag = ""
process.load("CondCore.CondDB.CondDB_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/Commissioning2018/MiniDaq/RAW/v1/000/312/774/00000/CCADE144-9431-E811-9641-FA163E220C5C.root'
# '/store/data/Run2012C/MiniDaq/RAW/v1/000/203/540/AA9053D9-F306-E211-80A4-001D09F248F8.root',
#'/store/data/Run2012C/MiniDaq/RAW/v1/000/199/204/148CF2AC-CAD0-E111-A056-001D09F291D2.root',
### '/store/data/Run2012C/MiniDaq/RAW/v1/000/200/132/D0E088D3-D7DC-E111-92D2-00237DDC5C24.root',
#'/store/data/Run2012C/MiniDaq/RAW/v1/000/198/510/0C50021A-F4C8-E111-B861-001D09F2512C.root',
#'/store/data/Run2012C/MiniDaq/RAW/v1/000/199/206/EC54DD2C-D3D0-E111-9019-5404A63886CE.root',
#'/store/data/Run2012C/MiniDaq/RAW/v1/000/200/131/E024E67D-D6DC-E111-A404-0025901D6288.root',
#'/store/data/Run2012C/MiniDaq/RAW/v1/000/201/074/92295641-C3E7-E111-899B-0025901D629C.root',
#'/store/data/Run2012C/MiniDaq/RAW/v1/000/200/133/ACB373F1-D9DC-E111-B891-003048F024FE.root',
#'/store/data/Run2012C/MiniDaq/RAW/v1/000/203/276/2CF5C87C-E303-E211-A314-001D09F28F25.root',
#'/store/data/Run2012C/MiniDaq/RAW/v1/000/203/535/1EEFEF95-F506-E211-A872-001D09F2906A.root',
#'/store/data/Run2012C/MiniDaq/RAW/v1/000/200/665/1C75364F-0EE3-E111-8021-BCAEC5329705.root',
#'/store/data/Run2012C/MiniDaq/RAW/v1/000/200/716/8A0AC842-A3E3-E111-A669-001D09F291D7.root'
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
# process.load("CalibMuon.DTCalibration.dt_offlineAnalysis_common_cff")
import EventFilter.DTRawToDigi.dturosunpacker_cfi
process.dtunpacker = EventFilter.DTRawToDigi.dturosunpacker_cfi.dturosunpacker.clone()
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDB,
timetype = cms.untracked.string('runnumber'),
toPut = cms.VPSet(cms.PSet(
record = cms.string('DTT0Rcd'),
tag = cms.string('t0')
))
)
process.PoolDBOutputService.connect = cms.string('sqlite_file:t0.db')
process.eventInfoProvider = cms.EDFilter("EventCoordinatesSource",
eventInfoFolder = cms.untracked.string('EventInfo/')
)
# test pulse monitoring
process.load("DQM.DTMonitorModule.dtDigiTask_TP_cfi")
process.load("DQM.DTMonitorClient.dtOccupancyTest_TP_cfi")
process.dtTPmonitor.readDB = False
process.dtTPmonitor.defaultTtrig = 600
process.dtTPmonitor.defaultTmax = 100
process.dtTPmonitor.inTimeHitsLowerBound = 0
process.dtTPmonitor.inTimeHitsUpperBound = 0
#file = open("tpDead.txt")
wiresToDebug = cms.untracked.vstring()
#for line in file:
# corrWire = line.split()[:6]
# #switch station/sector
# corrWire[1:3] = corrWire[2:0:-1]
# wire = ' '.join(corrWire)
# #print wire
# wiresToDebug.append(wire)
#file.close()
process.dtT0WireCalibration = cms.EDAnalyzer("DTT0Calibration",
correctByChamberMean = cms.bool(False),
# Cells for which you want the histos (default = None)
cellsWithHisto = wiresToDebug,
# Label to retrieve DT digis from the event
digiLabel = cms.untracked.string('dtunpacker'),
calibSector = cms.untracked.string('All'),
# Chose the wheel, sector (default = All)
calibWheel = cms.untracked.string('All'),
# Number of events to be used for the t0 per layer histos
eventsForWireT0 = cms.uint32(25000), #25000
# Name of the ROOT file which will contain the test pulse times per layer
rootFileName = cms.untracked.string('DTTestPulses.root'),
debug = cms.untracked.bool(False),
rejectDigiFromPeak = cms.uint32(50),
# Acceptance for TP peak width
tpPeakWidth = cms.double(15.0),
# Number of events to be used for the t0 per layer histos
eventsForLayerT0 = cms.uint32(5000), #5000
timeBoxWidth = cms.uint32(300),
tpPeakWidthPerLayer = cms.double(2.0)
)
process.output = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *',
'keep *_MEtoEDMConverter_*_*'),
fileName = cms.untracked.string('DQM.root')
)
process.load("DQMServices.Components.MEtoEDMConverter_cff")
#process.DQM.collectorHost = ''
process.p = cms.Path(process.dtunpacker*
process.dtTPmonitor+process.dtTPmonitorTest+
process.dtT0WireCalibration+
process.MEtoEDMConverter)
process.outpath = cms.EndPath(process.output)
| 43.238462 | 114 | 0.72923 | import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.debugModules = cms.untracked.vstring('*')
process.MessageLogger.destinations = cms.untracked.vstring('cerr')
process.MessageLogger.categories.append('resolution')
process.MessageLogger.cerr = cms.untracked.PSet(
FwkReport = cms.untracked.PSet(
limit = cms.untracked.int32(100),
reportEvery = cms.untracked.int32(1000)
),
threshold = cms.untracked.string('DEBUG'),
noLineBreaks = cms.untracked.bool(False),
DEBUG = cms.untracked.PSet(limit = cms.untracked.int32(0)),
INFO = cms.untracked.PSet(limit = cms.untracked.int32(0)),
resolution = cms.untracked.PSet(limit = cms.untracked.int32(-1))
)
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load("Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff")
process.GlobalTag.globaltag = ""
process.load("CondCore.CondDB.CondDB_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/Commissioning2018/MiniDaq/RAW/v1/000/312/774/00000/CCADE144-9431-E811-9641-FA163E220C5C.root'
Set( wantSummary = cms.untracked.bool(True) )
import EventFilter.DTRawToDigi.dturosunpacker_cfi
process.dtunpacker = EventFilter.DTRawToDigi.dturosunpacker_cfi.dturosunpacker.clone()
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDB,
timetype = cms.untracked.string('runnumber'),
toPut = cms.VPSet(cms.PSet(
record = cms.string('DTT0Rcd'),
tag = cms.string('t0')
))
)
process.PoolDBOutputService.connect = cms.string('sqlite_file:t0.db')
process.eventInfoProvider = cms.EDFilter("EventCoordinatesSource",
eventInfoFolder = cms.untracked.string('EventInfo/')
)
process.load("DQM.DTMonitorModule.dtDigiTask_TP_cfi")
process.load("DQM.DTMonitorClient.dtOccupancyTest_TP_cfi")
process.dtTPmonitor.readDB = False
process.dtTPmonitor.defaultTtrig = 600
process.dtTPmonitor.defaultTmax = 100
process.dtTPmonitor.inTimeHitsLowerBound = 0
process.dtTPmonitor.inTimeHitsUpperBound = 0
wiresToDebug = cms.untracked.vstring()
n = cms.EDAnalyzer("DTT0Calibration",
correctByChamberMean = cms.bool(False),
cellsWithHisto = wiresToDebug,
digiLabel = cms.untracked.string('dtunpacker'),
calibSector = cms.untracked.string('All'),
calibWheel = cms.untracked.string('All'),
eventsForWireT0 = cms.uint32(25000),
rootFileName = cms.untracked.string('DTTestPulses.root'),
debug = cms.untracked.bool(False),
rejectDigiFromPeak = cms.uint32(50),
tpPeakWidth = cms.double(15.0),
eventsForLayerT0 = cms.uint32(5000),
timeBoxWidth = cms.uint32(300),
tpPeakWidthPerLayer = cms.double(2.0)
)
process.output = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *',
'keep *_MEtoEDMConverter_*_*'),
fileName = cms.untracked.string('DQM.root')
)
process.load("DQMServices.Components.MEtoEDMConverter_cff")
process.p = cms.Path(process.dtunpacker*
process.dtTPmonitor+process.dtTPmonitorTest+
process.dtT0WireCalibration+
process.MEtoEDMConverter)
process.outpath = cms.EndPath(process.output)
| true | true |
f73c9e8b4cd746bac87d04c011dcab2a7ab2fad1 | 808 | py | Python | sutler/installers/node.py | joeladam518/sutler | c306568bd2520c49a2bdae238281074b915ba0cc | [
"0BSD"
] | null | null | null | sutler/installers/node.py | joeladam518/sutler | c306568bd2520c49a2bdae238281074b915ba0cc | [
"0BSD"
] | null | null | null | sutler/installers/node.py | joeladam518/sutler | c306568bd2520c49a2bdae238281074b915ba0cc | [
"0BSD"
] | null | null | null | import os
from .installer import Installer
class NodeInstaller(Installer):
versions = ('14', '15', '16', '17')
__source_file_path = '/etc/apt/sources.list.d/nodesource.list'
def install(self, version: str) -> None:
if version not in self.versions:
self.ctx.fail('Invalid node version')
os.chdir(self.app.user.home)
self.app.os.exec(f"curl -sL \"https://deb.nodesource.com/setup_{version}.x\" | sudo -E bash -")
self.app.os.install('nodejs')
def uninstall(self) -> None:
os.chdir(self.app.user.home)
self.app.os.uninstall('nodejs')
# TODO: Do I have to remove the apt gpg key?
if os.path.exists(self.__source_file_path):
self.app.os.rm(self.__source_file_path, root=True)
self.app.os.update()
| 35.130435 | 103 | 0.62995 | import os
from .installer import Installer
class NodeInstaller(Installer):
versions = ('14', '15', '16', '17')
__source_file_path = '/etc/apt/sources.list.d/nodesource.list'
def install(self, version: str) -> None:
if version not in self.versions:
self.ctx.fail('Invalid node version')
os.chdir(self.app.user.home)
self.app.os.exec(f"curl -sL \"https://deb.nodesource.com/setup_{version}.x\" | sudo -E bash -")
self.app.os.install('nodejs')
def uninstall(self) -> None:
os.chdir(self.app.user.home)
self.app.os.uninstall('nodejs')
if os.path.exists(self.__source_file_path):
self.app.os.rm(self.__source_file_path, root=True)
self.app.os.update()
| true | true |
f73ca0f7853c8ff85f4b9e63d83ce7463ecdd88f | 2,515 | py | Python | vocab_program/helpers.py | datwheat/hsCode | 880425c86f043c72ee3eb5bd4d8ca715f61b773d | [
"MIT"
] | 1 | 2017-10-04T17:55:02.000Z | 2017-10-04T17:55:02.000Z | vocab_program/helpers.py | datwheat/hsCode | 880425c86f043c72ee3eb5bd4d8ca715f61b773d | [
"MIT"
] | null | null | null | vocab_program/helpers.py | datwheat/hsCode | 880425c86f043c72ee3eb5bd4d8ca715f61b773d | [
"MIT"
] | null | null | null | import os
import json
import sys
import random
import time
filename = "db.json"
def init():
"""Initialize Database"""
if os.path.isfile(filename) != True:
db = open(filename, 'w').close()
else:
db = open(filename, 'r')
try:
content = json.load(db)
except ValueError:
content = []
return content
def callback(func):
time.sleep(0.2)
res = input('\nWould you like to try again? [y]es or [n]o\n\t=>').lower()
if res == 'n' or res == 'no':
return
elif res == 'y' or res == 'yes':
func()
else:
callback(func)
def add():
"""Appends words to dictionary."""
content = init()
key = input('\nWhat is your term?\n\t=>')
definition = input('\nWhat is your definition?\n\t=>')
content.append({'key':key, 'definition': definition})
db = open(filename, 'w')
json.dump(content, db)
db.close()
callback(add)
def view():
"""View your dictionary"""
content = init()
index = 1
if len(content) == 0:
print('\nYou have no words. Add more.')
for i in content:
key = i['key']
definition = i['definition']
print('{}. {}: {}\n'.format(index, key, definition))
index += 1
callback(view)
def delete():
"""Remove a word from your dictionary."""
content = init()
newContent = []
print('\n\tWord Choices (Case Sensitive):')
for word in content:
print('\n\t{}'.format(word['key']))
wordToDel = input('\n\tWhat word do you want to delete?\n\t\t=>')
validList = []
vCount = 0
for word in content:
validList.append(word['key'])
for key in validList:
print('\n\tScanning..')
time.sleep(0.1)
vCount += 1
if key != wordToDel and vCount < len(validList):
continue
elif key == wordToDel:
print('\n\tFound. Deleting..')
time.sleep(1)
print('\n\t\tSuccess!')
break
else:
print("\nInvalid Input.")
delete()
for i in content:
if i['key'] != wordToDel:
newContent.append(i)
db = open(filename, 'w')
json.dump(newContent, db)
db.close()
callback(delete)
def destroy():
"""Wipes dictionary clear of anything"""
open(filename, 'w').close()
def menu():
"""Menu Function"""
def run(func):
"""Calls functions"""
func()
menu()
print('\n\t\tYour Personal dictionary!')
choice = input("\nDo you want to 'view', 'add', 'delete', or 'destroy'? Enter 'x' to exit.\n\t=>").lower()
if choice == 'view':
run(view)
elif choice == 'add':
run(add)
elif choice == 'delete':
run(delete)
elif choice == 'destroy':
run(destroy)
elif choice == 'x':
raise SystemExit
else:
print('\n\tInvalid input.')
menu() | 18.093525 | 107 | 0.619085 | import os
import json
import sys
import random
import time
filename = "db.json"
def init():
if os.path.isfile(filename) != True:
db = open(filename, 'w').close()
else:
db = open(filename, 'r')
try:
content = json.load(db)
except ValueError:
content = []
return content
def callback(func):
time.sleep(0.2)
res = input('\nWould you like to try again? [y]es or [n]o\n\t=>').lower()
if res == 'n' or res == 'no':
return
elif res == 'y' or res == 'yes':
func()
else:
callback(func)
def add():
content = init()
key = input('\nWhat is your term?\n\t=>')
definition = input('\nWhat is your definition?\n\t=>')
content.append({'key':key, 'definition': definition})
db = open(filename, 'w')
json.dump(content, db)
db.close()
callback(add)
def view():
content = init()
index = 1
if len(content) == 0:
print('\nYou have no words. Add more.')
for i in content:
key = i['key']
definition = i['definition']
print('{}. {}: {}\n'.format(index, key, definition))
index += 1
callback(view)
def delete():
content = init()
newContent = []
print('\n\tWord Choices (Case Sensitive):')
for word in content:
print('\n\t{}'.format(word['key']))
wordToDel = input('\n\tWhat word do you want to delete?\n\t\t=>')
validList = []
vCount = 0
for word in content:
validList.append(word['key'])
for key in validList:
print('\n\tScanning..')
time.sleep(0.1)
vCount += 1
if key != wordToDel and vCount < len(validList):
continue
elif key == wordToDel:
print('\n\tFound. Deleting..')
time.sleep(1)
print('\n\t\tSuccess!')
break
else:
print("\nInvalid Input.")
delete()
for i in content:
if i['key'] != wordToDel:
newContent.append(i)
db = open(filename, 'w')
json.dump(newContent, db)
db.close()
callback(delete)
def destroy():
open(filename, 'w').close()
def menu():
def run(func):
func()
menu()
print('\n\t\tYour Personal dictionary!')
choice = input("\nDo you want to 'view', 'add', 'delete', or 'destroy'? Enter 'x' to exit.\n\t=>").lower()
if choice == 'view':
run(view)
elif choice == 'add':
run(add)
elif choice == 'delete':
run(delete)
elif choice == 'destroy':
run(destroy)
elif choice == 'x':
raise SystemExit
else:
print('\n\tInvalid input.')
menu() | true | true |
f73ca226ad51dec7ce013725765072d606f59e6c | 295 | py | Python | server/domain/auth/exceptions.py | multi-coop/catalogage-donnees | 1d70401ff6c7b01ec051460a253cb105adf65911 | [
"MIT"
] | null | null | null | server/domain/auth/exceptions.py | multi-coop/catalogage-donnees | 1d70401ff6c7b01ec051460a253cb105adf65911 | [
"MIT"
] | 14 | 2022-01-25T17:56:52.000Z | 2022-01-28T17:47:59.000Z | server/domain/auth/exceptions.py | multi-coop/catalogage-donnees | 1d70401ff6c7b01ec051460a253cb105adf65911 | [
"MIT"
] | null | null | null | from ..common.exceptions import DoesNotExist
class UserDoesNotExist(DoesNotExist):
entity_name = "User"
class EmailAlreadyExists(Exception):
def __init__(self, email: str) -> None:
super().__init__(f"Email already exists: {email!r}")
class LoginFailed(Exception):
pass
| 19.666667 | 60 | 0.718644 | from ..common.exceptions import DoesNotExist
class UserDoesNotExist(DoesNotExist):
entity_name = "User"
class EmailAlreadyExists(Exception):
def __init__(self, email: str) -> None:
super().__init__(f"Email already exists: {email!r}")
class LoginFailed(Exception):
pass
| true | true |
f73ca32e4ae0c0f2000b94378926e073c76dc89a | 5,028 | py | Python | lexicon/providers/digitalocean.py | felixonmars/lexicon | 1ea6d5bc6cdfbb6a2299411dbfa7d2c3830344c3 | [
"MIT"
] | 1 | 2018-07-24T01:19:19.000Z | 2018-07-24T01:19:19.000Z | lexicon/providers/digitalocean.py | felixonmars/lexicon | 1ea6d5bc6cdfbb6a2299411dbfa7d2c3830344c3 | [
"MIT"
] | 1 | 2018-07-26T15:19:39.000Z | 2018-07-26T15:19:39.000Z | lexicon/providers/digitalocean.py | gbdlin/lexicon | 3e43404d35c9eb104966972e59937cf88797bc4c | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import requests
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.add_argument("--auth-token", help="specify token used authenticate to DNS provider")
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.digitalocean.com/v2')
def authenticate(self):
payload = self._get('/domains/{0}'.format(self.options['domain']))
self.domain_id = self.options['domain']
def create_record(self, type, name, content):
# check if record already exists
if len(self.list_records(type, name, content)) == 0:
record = {
'type': type,
'name': self._relative_name(name),
'data': content,
}
if type == 'CNAME':
record['data'] = record['data'].rstrip('.') + '.' # make sure a the data is always a FQDN for CNAMe.
payload = self._post('/domains/{0}/records'.format(self.domain_id), record)
logger.debug('create_record: %s', True)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
url = '/domains/{0}/records'.format(self.domain_id)
records = []
payload = {}
next = url
while next is not None:
payload = self._get(next)
if 'links' in payload \
and 'pages' in payload['links'] \
and 'next' in payload['links']['pages']:
next = payload['links']['pages']['next']
else:
next = None
for record in payload['domain_records']:
processed_record = {
'type': record['type'],
'name': "{0}.{1}".format(record['name'], self.domain_id),
'ttl': '',
'content': record['data'],
'id': record['id']
}
records.append(processed_record)
if type:
records = [record for record in records if record['type'] == type]
if name:
records = [record for record in records if record['name'] == self._full_name(name)]
if content:
records = [record for record in records if record['content'].lower() == content.lower()]
logger.debug('list_records: %s', records)
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
data = {}
if type:
data['type'] = type
if name:
data['name'] = self._relative_name(name)
if content:
data['data'] = content
payload = self._put('/domains/{0}/records/{1}'.format(self.domain_id, identifier), data)
logger.debug('update_record: %s', True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self.list_records(type, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
logger.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
payload = self._delete('/domains/{0}/records/{1}'.format(self.domain_id, record_id))
# is always True at this point, if a non 200 response is returned an error is raised.
logger.debug('delete_record: %s', True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(self.options.get('auth_token'))
}
if not url.startswith(self.api_endpoint):
url = self.api_endpoint + url
r = requests.request(action, url, params=query_params,
data=json.dumps(data),
headers=default_headers)
r.raise_for_status() # if the request fails for any reason, throw an error.
if action == 'DELETE':
return ''
else:
return r.json()
| 35.659574 | 116 | 0.575179 | from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import requests
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.add_argument("--auth-token", help="specify token used authenticate to DNS provider")
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.digitalocean.com/v2')
def authenticate(self):
payload = self._get('/domains/{0}'.format(self.options['domain']))
self.domain_id = self.options['domain']
def create_record(self, type, name, content):
if len(self.list_records(type, name, content)) == 0:
record = {
'type': type,
'name': self._relative_name(name),
'data': content,
}
if type == 'CNAME':
record['data'] = record['data'].rstrip('.') + '.'
payload = self._post('/domains/{0}/records'.format(self.domain_id), record)
logger.debug('create_record: %s', True)
return True
def list_records(self, type=None, name=None, content=None):
url = '/domains/{0}/records'.format(self.domain_id)
records = []
payload = {}
next = url
while next is not None:
payload = self._get(next)
if 'links' in payload \
and 'pages' in payload['links'] \
and 'next' in payload['links']['pages']:
next = payload['links']['pages']['next']
else:
next = None
for record in payload['domain_records']:
processed_record = {
'type': record['type'],
'name': "{0}.{1}".format(record['name'], self.domain_id),
'ttl': '',
'content': record['data'],
'id': record['id']
}
records.append(processed_record)
if type:
records = [record for record in records if record['type'] == type]
if name:
records = [record for record in records if record['name'] == self._full_name(name)]
if content:
records = [record for record in records if record['content'].lower() == content.lower()]
logger.debug('list_records: %s', records)
return records
def update_record(self, identifier, type=None, name=None, content=None):
data = {}
if type:
data['type'] = type
if name:
data['name'] = self._relative_name(name)
if content:
data['data'] = content
payload = self._put('/domains/{0}/records/{1}'.format(self.domain_id, identifier), data)
logger.debug('update_record: %s', True)
return True
def delete_record(self, identifier=None, type=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self.list_records(type, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
logger.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
payload = self._delete('/domains/{0}/records/{1}'.format(self.domain_id, record_id))
logger.debug('delete_record: %s', True)
return True
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(self.options.get('auth_token'))
}
if not url.startswith(self.api_endpoint):
url = self.api_endpoint + url
r = requests.request(action, url, params=query_params,
data=json.dumps(data),
headers=default_headers)
r.raise_for_status()
if action == 'DELETE':
return ''
else:
return r.json()
| true | true |
f73ca4d27cea6b93175f0c45c03f59df6011ba6f | 534 | py | Python | backend/sql.py | digitaltembo/stylobate | c22dbbb671612b2c95f84b7ee95dcb40f1fb6baa | [
"MIT"
] | 4 | 2020-07-29T02:01:41.000Z | 2022-02-19T13:11:30.000Z | backend/sql.py | digitaltembo/stylobate | c22dbbb671612b2c95f84b7ee95dcb40f1fb6baa | [
"MIT"
] | 4 | 2021-03-11T02:00:08.000Z | 2022-02-19T05:07:33.000Z | backend/sql.py | digitaltembo/stylobate | c22dbbb671612b2c95f84b7ee95dcb40f1fb6baa | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from utils.config import DB_URL
# SQLALCHEMY_DATABASE_URL = "postgresql://user:password@postgresserver/db"
engine = create_engine(
DB_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close() | 25.428571 | 75 | 0.758427 | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from utils.config import DB_URL
engine = create_engine(
DB_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close() | true | true |
f73ca519ae721a4299f7081ea11fc969ddeb393b | 1,337 | py | Python | app/core/tests/test_admin.py | danoscarmike/recipe-app-api | 530461aa48a94b7b189721de03b3f5f18aef09fe | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | danoscarmike/recipe-app-api | 530461aa48a94b7b189721de03b3f5f18aef09fe | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | danoscarmike/recipe-app-api | 530461aa48a94b7b189721de03b3f5f18aef09fe | [
"MIT"
] | null | null | null | from django.test import Client, TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@danoscarmike.com',
password='SillyPassword123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@danoscarmike.com',
password='ReallySillyPassword123',
name='Dummy user'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 32.609756 | 68 | 0.646971 | from django.test import Client, TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@danoscarmike.com',
password='SillyPassword123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@danoscarmike.com',
password='ReallySillyPassword123',
name='Dummy user'
)
def test_users_listed(self):
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| true | true |
f73caa96f70355b346f59787c08625aa604d2c31 | 9,857 | py | Python | obztak/seeing.py | kadrlica/obztak | d24a8fe0659f2602f5d87cdfd99ebcf5224e418c | [
"MIT"
] | 4 | 2019-01-18T20:34:29.000Z | 2022-01-26T14:35:31.000Z | obztak/seeing.py | kadrlica/obztak | d24a8fe0659f2602f5d87cdfd99ebcf5224e418c | [
"MIT"
] | 8 | 2017-06-22T19:12:31.000Z | 2020-01-27T03:52:59.000Z | obztak/seeing.py | kadrlica/obztak | d24a8fe0659f2602f5d87cdfd99ebcf5224e418c | [
"MIT"
] | 3 | 2017-12-15T21:51:23.000Z | 2019-08-10T05:07:53.000Z | #!/usr/bin/env python
"""
Generic python script.
"""
__author__ = "Alex Drlica-Wagner"
from collections import OrderedDict as odict
import logging
import copy
import numpy as np
import pandas as pd
import dateutil.parser
import ephem
from obztak.utils import fileio
from obztak.utils.date import datestring
from obztak.utils.database import Database
# These are nominal transformation values from Eric Neilsen
# WAVE[x] = (lambda[x]/lambda[i])**0.2
WAVE = odict([
( 'u' , 0.86603 ), # u (380nm) -> i (780nm)
( 'g' , 0.9067 ), # g (480nm) -> i (780nm)
( 'r' , 0.9609 ), # r (640nm) -> i (780nm)
( 'i' , 1.0 ), # i (780nm) -> i (780nm)
( 'z' , 1.036 ), # z (920nm) -> i (780nm)
( 'Y' , 1.0523 ), # Y (990nm) -> i (780nm)
('dimm', 1/1.0916 ), # dimm (500 nm)->i (780nm)
('VR' , 0.9551 ), # VR (620 nm)->i (780nm)
])
WAVE_DF = pd.DataFrame({'filter':WAVE.keys(),'trans':WAVE.values()})
DECAMINST = 0.5 # DECam instrumental contribution to the PSF [arcsec]
DIMMINST = 0.0 # DIMM instrumental contribution to the PSF [arcsec]
def convert(fwhm_1,
band_1='dimm', airmass_1=1.0, inst_1=DIMMINST,
band_2='i', airmass_2=1.0, inst_2=DECAMINST):
"""
Convert observed seeing value to another band and airmass.
Parameters:
-----------
fwhm_1 : input fwhm [arcsec]
band_1 : input band ['g','r','i','z','Y','dimm']
airmass_1: input airmass
inst_1 : instrumental contribution to the observed psf [arcsec]
band_2 : output band ['g','r','i','z','Y','dimm']
airmass_2 : output airmass
inst_2 : instrumental contribution to the output psf [arcsec]
Returns:
--------
fwhm_2 : output fwhm [arcsec]
"""
fwhm = np.sqrt(fwhm_1**2 - inst_1**2)
if np.isscalar(band_1):
wave_1 = WAVE[band_1]
else:
wave_1 = WAVE_DF.merge(pd.DataFrame({'filter':band_1}), on='filter').to_records()['trans']
if np.isscalar(band_2):
wave_2 = WAVE[band_2]
else:
wave_2 = WAVE_DF.merge(pd.DataFrame({'filter':band_2}), on='filter').to_records()['trans']
fwhm_2 = fwhm * (wave_1/wave_2) * (airmass_2/airmass_1)**(0.6)
return np.hypot(fwhm_2, inst_2)
class Seeing():
"""Class to manage seeing data. Seeign data is stored in two member variables:
self.raw : the raw data before transformation
self.data: seeing data transformed atmospheric i-band zenith
The two values differ in that self.raw can have any source and
includes the instrumental contribution. In contrast, self.data is
the "atmospheric" i-band FWHM (arcsec). To get a prediction of the
observed PSF, use `get_fwhm`.
"""
DTYPE = [('date','<M8[ns]'),('fwhm',float),('airmass',float),('filter','S4')]
def __init__(self, date=None, db='fnal', filename=None):
self.set_date(date)
self.df = self.read_file(filename)
self.db = 'db-'+db
def set_date(self, date):
if date is None:
#NOOP (consistent with Tactician)
return
elif date == 'now':
self.date = dateutil.parser.parse(datestring(ephem.now()))
else:
self.date = dateutil.parser.parse(date)
def get_fwhm(self, timedelta='15m', band='i', airmass=1.0, inst=DECAMINST):
"""Calculate the predict PSF FWHM (arcsec).
Parameters:
-----------
date : date to estimate the psf (defualt: now)
timedelta : time range to use to estimate the psf
band : output band
airmass : output airmass
inst : output instrument contribution
Returns:
--------
fwhm : predicted fwhm (arcsec)
"""
timedelta = pd.Timedelta(timedelta)
self.load_data(timedelta=max(3*timedelta,pd.Timedelta('1h')))
dt = pd.DatetimeIndex(self.data['date'])
previous = slice(-1,None) # most recent exposure
recent = (dt < self.date) & (dt > (self.date - timedelta))
ancient = (dt < (self.date - timedelta)) & (dt > (self.date - 2*timedelta))
# Nominal atmospheric psf i-band zenith fwhm = 0.9"
xmu = np.log10(0.74833) # sqrt(0.9**2 - 0.5**2)
if not len(self.data):
# No data, use the mean and print a warning
logging.warn("No fwhm data available; using DECam median")
xpred = xmu
elif np.any(recent) and np.any(ancient):
# Weighted median of recent and ancient exposures
logging.debug("Seeing from recent and ancient exposures")
# Log of the observed atmospheric psf i-band zenith
x = np.log10([np.median(self.data[recent]['fwhm']),
np.median(self.data[ancient]['fwhm'])])
# Predicted log of the atmospheric psf
# NB: These constants were derived for timedelta=5min
# they may not hold for arbitrary time windows.
xpred = xmu + 0.8 * (x[0] - xmu) + 0.14 * (x[1] - xmu)
elif np.any(recent):
# Median of the log of the observed atmospheric psf i-band zenith
logging.debug("Seeing from recent exposures")
xpred = np.log10(np.median(self.data[recent]['fwhm']))
else:
# Log of the i-band zenith fwhm from the previous exposure
logging.debug("Seeing from previous exposure")
xpred = np.log10(np.median(self.data[previous]['fwhm']))
fwhm_pred = convert(10**xpred,
band_1='i' , airmass_1=1.0 , inst_1=0.0,
band_2=band, airmass_2=airmass, inst_2=inst)
#import pdb; pdb.set_trace()
return fwhm_pred
class DimmSeeing(Seeing):
"""Estimate seeing from the DIMM."""
@classmethod
def read_file(cls, filename):
if filename is None: return None
df = pd.read_csv(filename,names=['date','fwhm'],
parse_dates=['date'],index_col=['date'])
return df
def get_data(self, date=None, timedelta='30m'):
self.set_date(date)
tmax = self.date
tmin = self.date - pd.Timedelta(timedelta)
if self.df is None:
# Don't want to create the DB each time?
db = Database(self.db)
db.connect()
query ="""
select date, dimm2see as fwhm from exposure
where date > '%s' and date < '%s'
and dimm2see is not NULL
"""%(tmin, tmax)
logging.debug(query)
raw = db.query2rec(query)
else:
sel = (self.df.index > tmin) & (self.df.index < tmax)
raw = self.df[sel].to_records()
return raw
def load_data(self, date=None, timedelta='30m'):
raw = self.get_data(date, timedelta)
# Save the raw dimm values
self.raw = np.recarray(len(raw),dtype=self.DTYPE)
self.raw['date'] = raw['date']
self.raw['fwhm'] = raw['fwhm']
self.raw['airmass'] = 1.0
self.raw['filter'] = 'dimm'
# Convert to i-band zenith
self.data = copy.deepcopy(self.raw)
self.data['filter'] = 'i'
self.data['airmass'] = 1.0
kwargs = dict(band_1='dimm', inst_1=DIMMINST, airmass_1=self.raw['airmass'])
kwargs.update(band_2='i', inst_2=0.0 , airmass_2=self.data['airmass'])
self.data['fwhm'] = convert(self.raw['fwhm'],**kwargs)
return self.data
class QcSeeing(Seeing):
"""Estimate seeing from the DECam QC values."""
@classmethod
def read_file(cls, filename):
if filename is None: return None
df = pd.read_csv(filename,names=['date','fwhm','airmass','filter'],
parse_dates=['date'],index_col=['date'])
return df
def get_data(self, date=None, timedelta='30m'):
self.set_date(date)
tmax = self.date
tmin = self.date - pd.Timedelta(timedelta)
if self.df is None:
# Don't want to create the DB each time?
try:
db = Database()
db.connect()
query ="""
select date, qc_fwhm as fwhm, airmass, filter from exposure
where date > '%s' and date < '%s'
--and filter != 'VR' and qc_fwhm is not NULL
and qc_fwhm is not NULL and qc_fwhm > 0
"""%(tmin, tmax)
logging.debug(query)
raw = db.query2rec(query)
except Exception as e:
logging.warn("Couldn't connect to database:\n%s"%str(e))
dtype=[('date', '<M8[ns]'), ('fwhm', '<f8'),
('airmass', '<f8'), ('filter', 'S4')]
raw = np.recarray(0,dtype=dtype)
else:
sel = (self.df.index > tmin) & (self.df.index < tmax)
raw = self.df[sel].to_records()
return raw
def load_data(self, date=None, timedelta='30m'):
raw = self.get_data(date,timedelta)
# Save the raw dimm values
self.raw = np.recarray(len(raw),dtype=self.DTYPE)
self.raw['date'] = raw['date']
self.raw['fwhm'] = raw['fwhm']
self.raw['airmass'] = raw['airmass']
self.raw['filter'] = raw['filter']
# Convert to i-band zenith
self.data = copy.deepcopy(self.raw)
self.data['filter'] = 'i'
self.data['airmass'] = 1.0
kwargs = dict(band_1=self.raw['filter'], inst_1=DECAMINST, airmass_1=self.raw['airmass'])
kwargs.update(band_2='i', inst_2=0.0 , airmass_2=self.data['airmass'])
self.data['fwhm'] = convert(self.raw['fwhm'],**kwargs)
return self.data
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=__doc__)
args = parser.parse_args()
| 36.106227 | 98 | 0.567718 |
__author__ = "Alex Drlica-Wagner"
from collections import OrderedDict as odict
import logging
import copy
import numpy as np
import pandas as pd
import dateutil.parser
import ephem
from obztak.utils import fileio
from obztak.utils.date import datestring
from obztak.utils.database import Database
WAVE = odict([
( 'u' , 0.86603 ),
( 'g' , 0.9067 ),
( 'r' , 0.9609 ),
( 'i' , 1.0 ),
( 'z' , 1.036 ),
( 'Y' , 1.0523 ),
('dimm', 1/1.0916 ),
('VR' , 0.9551 ),
])
WAVE_DF = pd.DataFrame({'filter':WAVE.keys(),'trans':WAVE.values()})
DECAMINST = 0.5
DIMMINST = 0.0
def convert(fwhm_1,
band_1='dimm', airmass_1=1.0, inst_1=DIMMINST,
band_2='i', airmass_2=1.0, inst_2=DECAMINST):
fwhm = np.sqrt(fwhm_1**2 - inst_1**2)
if np.isscalar(band_1):
wave_1 = WAVE[band_1]
else:
wave_1 = WAVE_DF.merge(pd.DataFrame({'filter':band_1}), on='filter').to_records()['trans']
if np.isscalar(band_2):
wave_2 = WAVE[band_2]
else:
wave_2 = WAVE_DF.merge(pd.DataFrame({'filter':band_2}), on='filter').to_records()['trans']
fwhm_2 = fwhm * (wave_1/wave_2) * (airmass_2/airmass_1)**(0.6)
return np.hypot(fwhm_2, inst_2)
class Seeing():
DTYPE = [('date','<M8[ns]'),('fwhm',float),('airmass',float),('filter','S4')]
def __init__(self, date=None, db='fnal', filename=None):
self.set_date(date)
self.df = self.read_file(filename)
self.db = 'db-'+db
def set_date(self, date):
if date is None:
return
elif date == 'now':
self.date = dateutil.parser.parse(datestring(ephem.now()))
else:
self.date = dateutil.parser.parse(date)
def get_fwhm(self, timedelta='15m', band='i', airmass=1.0, inst=DECAMINST):
timedelta = pd.Timedelta(timedelta)
self.load_data(timedelta=max(3*timedelta,pd.Timedelta('1h')))
dt = pd.DatetimeIndex(self.data['date'])
previous = slice(-1,None)
recent = (dt < self.date) & (dt > (self.date - timedelta))
ancient = (dt < (self.date - timedelta)) & (dt > (self.date - 2*timedelta))
xmu = np.log10(0.74833) # sqrt(0.9**2 - 0.5**2)
if not len(self.data):
# No data, use the mean and print a warning
logging.warn("No fwhm data available; using DECam median")
xpred = xmu
elif np.any(recent) and np.any(ancient):
# Weighted median of recent and ancient exposures
logging.debug("Seeing from recent and ancient exposures")
# Log of the observed atmospheric psf i-band zenith
x = np.log10([np.median(self.data[recent]['fwhm']),
np.median(self.data[ancient]['fwhm'])])
# Predicted log of the atmospheric psf
# NB: These constants were derived for timedelta=5min
# they may not hold for arbitrary time windows.
xpred = xmu + 0.8 * (x[0] - xmu) + 0.14 * (x[1] - xmu)
elif np.any(recent):
# Median of the log of the observed atmospheric psf i-band zenith
logging.debug("Seeing from recent exposures")
xpred = np.log10(np.median(self.data[recent]['fwhm']))
else:
# Log of the i-band zenith fwhm from the previous exposure
logging.debug("Seeing from previous exposure")
xpred = np.log10(np.median(self.data[previous]['fwhm']))
fwhm_pred = convert(10**xpred,
band_1='i' , airmass_1=1.0 , inst_1=0.0,
band_2=band, airmass_2=airmass, inst_2=inst)
#import pdb; pdb.set_trace()
return fwhm_pred
class DimmSeeing(Seeing):
@classmethod
def read_file(cls, filename):
if filename is None: return None
df = pd.read_csv(filename,names=['date','fwhm'],
parse_dates=['date'],index_col=['date'])
return df
def get_data(self, date=None, timedelta='30m'):
self.set_date(date)
tmax = self.date
tmin = self.date - pd.Timedelta(timedelta)
if self.df is None:
# Don't want to create the DB each time?
db = Database(self.db)
db.connect()
query ="""
select date, dimm2see as fwhm from exposure
where date > '%s' and date < '%s'
and dimm2see is not NULL
"""%(tmin, tmax)
logging.debug(query)
raw = db.query2rec(query)
else:
sel = (self.df.index > tmin) & (self.df.index < tmax)
raw = self.df[sel].to_records()
return raw
def load_data(self, date=None, timedelta='30m'):
raw = self.get_data(date, timedelta)
# Save the raw dimm values
self.raw = np.recarray(len(raw),dtype=self.DTYPE)
self.raw['date'] = raw['date']
self.raw['fwhm'] = raw['fwhm']
self.raw['airmass'] = 1.0
self.raw['filter'] = 'dimm'
# Convert to i-band zenith
self.data = copy.deepcopy(self.raw)
self.data['filter'] = 'i'
self.data['airmass'] = 1.0
kwargs = dict(band_1='dimm', inst_1=DIMMINST, airmass_1=self.raw['airmass'])
kwargs.update(band_2='i', inst_2=0.0 , airmass_2=self.data['airmass'])
self.data['fwhm'] = convert(self.raw['fwhm'],**kwargs)
return self.data
class QcSeeing(Seeing):
@classmethod
def read_file(cls, filename):
if filename is None: return None
df = pd.read_csv(filename,names=['date','fwhm','airmass','filter'],
parse_dates=['date'],index_col=['date'])
return df
def get_data(self, date=None, timedelta='30m'):
self.set_date(date)
tmax = self.date
tmin = self.date - pd.Timedelta(timedelta)
if self.df is None:
# Don't want to create the DB each time?
try:
db = Database()
db.connect()
query ="""
select date, qc_fwhm as fwhm, airmass, filter from exposure
where date > '%s' and date < '%s'
--and filter != 'VR' and qc_fwhm is not NULL
and qc_fwhm is not NULL and qc_fwhm > 0
"""%(tmin, tmax)
logging.debug(query)
raw = db.query2rec(query)
except Exception as e:
logging.warn("Couldn't connect to database:\n%s"%str(e))
dtype=[('date', '<M8[ns]'), ('fwhm', '<f8'),
('airmass', '<f8'), ('filter', 'S4')]
raw = np.recarray(0,dtype=dtype)
else:
sel = (self.df.index > tmin) & (self.df.index < tmax)
raw = self.df[sel].to_records()
return raw
def load_data(self, date=None, timedelta='30m'):
raw = self.get_data(date,timedelta)
# Save the raw dimm values
self.raw = np.recarray(len(raw),dtype=self.DTYPE)
self.raw['date'] = raw['date']
self.raw['fwhm'] = raw['fwhm']
self.raw['airmass'] = raw['airmass']
self.raw['filter'] = raw['filter']
# Convert to i-band zenith
self.data = copy.deepcopy(self.raw)
self.data['filter'] = 'i'
self.data['airmass'] = 1.0
kwargs = dict(band_1=self.raw['filter'], inst_1=DECAMINST, airmass_1=self.raw['airmass'])
kwargs.update(band_2='i', inst_2=0.0 , airmass_2=self.data['airmass'])
self.data['fwhm'] = convert(self.raw['fwhm'],**kwargs)
return self.data
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=__doc__)
args = parser.parse_args()
| true | true |
f73caa9a9825042422a26fca312f9f5231731f8a | 3,489 | py | Python | fixture/orm.py | anastas11a/python_training | 1daceddb193d92542f7f7313026a7e67af4d89bb | [
"Apache-2.0"
] | null | null | null | fixture/orm.py | anastas11a/python_training | 1daceddb193d92542f7f7313026a7e67af4d89bb | [
"Apache-2.0"
] | null | null | null | fixture/orm.py | anastas11a/python_training | 1daceddb193d92542f7f7313026a7e67af4d89bb | [
"Apache-2.0"
] | null | null | null | from pony.orm import *
from model.group import Group
from model.contact import Contact
class ORMFixture:
db = Database()
class ORMGroup(db.Entity):
_table_ = 'group_list'
id = PrimaryKey(int, column='group_id')
name = Optional(str, column='group_name')
header = Optional(str, column='group_header')
footer = Optional(str, column='group_footer')
contacts = Set(lambda: ORMFixture.ORMContact, table="address_in_groups", column = "id", reverse="groups", lazy=True)
class ORMContact(db.Entity):
_table_ = 'addressbook'
id = PrimaryKey(int, column='id')
firstname = Optional(str, column='firstname')
lastname = Optional(str, column='lastname')
deprecated = Optional(str, column='deprecated')
middlename = Optional(str, column="middlename")
nickname = Optional(str, column="nickname")
company = Optional(str, column="company")
title = Optional(str, column="title")
address = Optional(str, column="address")
home = Optional(str, column="home")
mobile = Optional(str, column="mobile")
work = Optional(str, column="work")
fax = Optional(str, column="fax")
email1 = Optional(str, column="email")
email2 = Optional(str, column="email2")
email3 = Optional(str, column="email3")
homepage = Optional(str, column="homepage")
address2 = Optional(str, column="address2")
phone2 = Optional(str, column="phone2")
notes = Optional(str, column="notes")
groups = Set(lambda: ORMFixture.ORMGroup, table="address_in_groups", column="group_id", reverse="contacts", lazy=True)
def __init__(self, host, name, user, password):
self.db.bind('mysql', host=host, database=name, user=user, password=password)
self.db.generate_mapping()
sql_debug(True)
def convert_groups_to_model(self, groups):
def convert(group):
return(Group(id=str(group.id), name=group.name, header=group.header, footer=group.footer))
return list(map(convert, groups))
@db_session
def get_group_list(self):
return self.convert_groups_to_model(select(g for g in ORMFixture.ORMGroup))
def convert_contacts_to_model(self, contacts):
def convert(contact):
return(Contact(id=str(contact.id), firstname=contact.firstname, lastname=contact.lastname, middlename=contact.middlename,
nickname=contact.nickname, company=contact.company,
title=contact.title, address=contact.address, homephone=contact.home,
mobilephone=contact.mobile, address2=contact.address2, phone2=contact.phone2, notes=contact.notes))
return list(map(convert, contacts))
@db_session
def get_contact_list(self):
return self.convert_contacts_to_model(select(c for c in ORMFixture.ORMContact if c.deprecated is None))
@db_session
def get_contacts_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(orm_group.contacts)
@db_session
def get_contacts_not_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(
select(c for c in ORMFixture.ORMContact if c.deprecated is None and orm_group not in c.groups)) | 44.730769 | 133 | 0.662654 | from pony.orm import *
from model.group import Group
from model.contact import Contact
class ORMFixture:
db = Database()
class ORMGroup(db.Entity):
_table_ = 'group_list'
id = PrimaryKey(int, column='group_id')
name = Optional(str, column='group_name')
header = Optional(str, column='group_header')
footer = Optional(str, column='group_footer')
contacts = Set(lambda: ORMFixture.ORMContact, table="address_in_groups", column = "id", reverse="groups", lazy=True)
class ORMContact(db.Entity):
_table_ = 'addressbook'
id = PrimaryKey(int, column='id')
firstname = Optional(str, column='firstname')
lastname = Optional(str, column='lastname')
deprecated = Optional(str, column='deprecated')
middlename = Optional(str, column="middlename")
nickname = Optional(str, column="nickname")
company = Optional(str, column="company")
title = Optional(str, column="title")
address = Optional(str, column="address")
home = Optional(str, column="home")
mobile = Optional(str, column="mobile")
work = Optional(str, column="work")
fax = Optional(str, column="fax")
email1 = Optional(str, column="email")
email2 = Optional(str, column="email2")
email3 = Optional(str, column="email3")
homepage = Optional(str, column="homepage")
address2 = Optional(str, column="address2")
phone2 = Optional(str, column="phone2")
notes = Optional(str, column="notes")
groups = Set(lambda: ORMFixture.ORMGroup, table="address_in_groups", column="group_id", reverse="contacts", lazy=True)
def __init__(self, host, name, user, password):
self.db.bind('mysql', host=host, database=name, user=user, password=password)
self.db.generate_mapping()
sql_debug(True)
def convert_groups_to_model(self, groups):
def convert(group):
return(Group(id=str(group.id), name=group.name, header=group.header, footer=group.footer))
return list(map(convert, groups))
@db_session
def get_group_list(self):
return self.convert_groups_to_model(select(g for g in ORMFixture.ORMGroup))
def convert_contacts_to_model(self, contacts):
def convert(contact):
return(Contact(id=str(contact.id), firstname=contact.firstname, lastname=contact.lastname, middlename=contact.middlename,
nickname=contact.nickname, company=contact.company,
title=contact.title, address=contact.address, homephone=contact.home,
mobilephone=contact.mobile, address2=contact.address2, phone2=contact.phone2, notes=contact.notes))
return list(map(convert, contacts))
@db_session
def get_contact_list(self):
return self.convert_contacts_to_model(select(c for c in ORMFixture.ORMContact if c.deprecated is None))
@db_session
def get_contacts_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(orm_group.contacts)
@db_session
def get_contacts_not_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(
select(c for c in ORMFixture.ORMContact if c.deprecated is None and orm_group not in c.groups)) | true | true |
f73caad320ebd11ff60af29d1a8efec454ebb12e | 194 | py | Python | task_5.py | user275301/ProjectEuler_tasks | e5507975b089cd901bba5c539d2a4f3306ddab02 | [
"MIT"
] | null | null | null | task_5.py | user275301/ProjectEuler_tasks | e5507975b089cd901bba5c539d2a4f3306ddab02 | [
"MIT"
] | null | null | null | task_5.py | user275301/ProjectEuler_tasks | e5507975b089cd901bba5c539d2a4f3306ddab02 | [
"MIT"
] | null | null | null | maxdivider = 20
def task5(maxdivider):
num = 11
test = 1
while test != 0:
test = 0
for div in range(1,maxdivider):
test += num%div
num += 1
return num - 1
print(task5(maxdivider)) | 14.923077 | 33 | 0.634021 | maxdivider = 20
def task5(maxdivider):
num = 11
test = 1
while test != 0:
test = 0
for div in range(1,maxdivider):
test += num%div
num += 1
return num - 1
print(task5(maxdivider)) | true | true |
f73caaed0848c16bacf6f77fdc692296c0be1e57 | 827 | py | Python | nesara/urls.py | Annonymus-Coder/Tours-And-Travels-project | 81fce5c24599895b6526eeb28bd4582b5d55948b | [
"MIT"
] | null | null | null | nesara/urls.py | Annonymus-Coder/Tours-And-Travels-project | 81fce5c24599895b6526eeb28bd4582b5d55948b | [
"MIT"
] | null | null | null | nesara/urls.py | Annonymus-Coder/Tours-And-Travels-project | 81fce5c24599895b6526eeb28bd4582b5d55948b | [
"MIT"
] | null | null | null | """nesara URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('NesaraTours.urls')),
]
| 34.458333 | 78 | 0.680774 | from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('NesaraTours.urls')),
]
| true | true |
f73caaed636a339de3fe0a88cd8760c19f1791c9 | 7,801 | py | Python | src/config/bot_config.py | nostalgebraist/nostalgebraist-autoresponder | 622349c4cad2a7aec1017837416c58a678151aae | [
"MIT"
] | 39 | 2020-06-19T05:38:11.000Z | 2022-03-28T04:35:31.000Z | src/config/bot_config.py | nostalgebraist/nostalgebraist-autoresponder | 622349c4cad2a7aec1017837416c58a678151aae | [
"MIT"
] | null | null | null | src/config/bot_config.py | nostalgebraist/nostalgebraist-autoresponder | 622349c4cad2a7aec1017837416c58a678151aae | [
"MIT"
] | 2 | 2021-04-13T18:12:03.000Z | 2021-12-16T23:20:12.000Z | from typing import List, Set, Dict
import json
import pytumblr
from api_tumblr.pytumblr_wrapper import RateLimitClient
API_KEYS_TYPE = List[str]
class BotSpecificConstants:
"""Values specific to my development environment and/or the social context of my bot, e.g. specific posts IDs where I need apply some override, or specific users I need to treat specially, etc"""
def __init__(
self,
blogName: str,
dash_blogName: str,
REBLOG_START_TS: int,
DASH_START_TS: int,
private_clients_api_keys: List[API_KEYS_TYPE],
dashboard_clients_api_keys: List[API_KEYS_TYPE],
bridge_service_host: str,
bridge_service_port: int,
BRIDGE_SERVICE_REMOTE_HOST: str,
BUCKET_NAME: str,
ask_min_words: int,
NO_REBLOG_IDS: Set[int] = set(),
DEF_REBLOG_IDS: Set[int] = set(),
FORCE_TRAIL_HACK_IDS: Set[int] = set(),
USER_AVOID_LIST: Set[str] = set(),
TAG_AVOID_LIST: Set[str] = set(),
DASH_TAG_AVOID_LIST: Set[str] = set(),
REPLY_USER_AUTO_ACCEPT_LIST: Set[str] = set(),
bad_strings: Set[str] = set(),
bad_strings_shortwords: Set[str] = set(),
okay_superstrings: Set[str] = set(),
likely_obscured_strings: Set[str] = set(),
profane_strings: Set[str] = set(),
hardstop_strings_review: Set[str] = set(),
hardstop_strings_reject: Set[str] = set(),
LIMITED_USERS: Dict[str, float] = dict(),
LIMITED_SUBSTRINGS: Dict[str, float] = dict(),
SCREENED_USERS: Set[str] = set(),
NO_SCRAPE_USERS: Set[str] = set(),
):
# TODO: standardize case in names
self.blogName = blogName
self.dash_blogName = dash_blogName
# when reblog feature started
self.REBLOG_START_TS = REBLOG_START_TS
# when reblog-from-dash feature started
self.DASH_START_TS = DASH_START_TS
# don't reblog these post IDs -- generally used when I want to write about the bot and then reblog to the bot
# i don't want a separate bot reblog "responding" to me
self.NO_REBLOG_IDS = NO_REBLOG_IDS
self.DEF_REBLOG_IDS = DEF_REBLOG_IDS
# overrides for tumblr blockquote weirdness
self.FORCE_TRAIL_HACK_IDS = FORCE_TRAIL_HACK_IDS
# tumblr api keys (4 strings per key)
self.private_clients_api_keys = private_clients_api_keys
self.dashboard_clients_api_keys = dashboard_clients_api_keys
# host name of the bridge service used in clients we expect to be running on the same machine
# (i.e. should be localhost under normal circumstances)
self.bridge_service_host = bridge_service_host
# port of the bridge service
self.bridge_service_port = bridge_service_port
# name of Google Cloud Storage bucket used to store models and data
self.BUCKET_NAME = BUCKET_NAME
# host name of the bridge service used in ML code
# if the ML code is running remotely, this will differ from `bridge_service_host`
self.BRIDGE_SERVICE_REMOTE_HOST = BRIDGE_SERVICE_REMOTE_HOST
# don't interact or mention these users
self.USER_AVOID_LIST = USER_AVOID_LIST
# bot-written post tags are removed if they contain any of these (substring matches, case-insensitive)
self.TAG_AVOID_LIST = TAG_AVOID_LIST
# don't reblog from dash if tags contain these (substring matches)
self.DASH_TAG_AVOID_LIST = DASH_TAG_AVOID_LIST
# for frequent repliers who don't otherwise trigger "OK to respond to this reply" logic
self.REPLY_USER_AUTO_ACCEPT_LIST = REPLY_USER_AUTO_ACCEPT_LIST
# write draft instead of auto-publish when post/tags contain these substrings
self.bad_strings = bad_strings
# form elements of bad_strings from these surrounded by various whitespace/punctuation
self.bad_strings_shortwords = bad_strings_shortwords
# ignore items from `bad_strings` when they appear inside of these longer strings
# e.g. if we wanted to filter "sex" without filtering "anne sexton"
self.okay_superstrings = okay_superstrings
# like bad_strings, but we attempt to detect these even if the user is trying to obscure them
# with e.g. zero-width unicode or l33tsp34k
self.likely_obscured_strings = likely_obscured_strings
# like bad_strings, but only used in contexts where we're trying to keep the language rated PG
self.profane_strings = profane_strings
# force write draft instead of auto-publish on these strings, even if ML model accepts post
self.hardstop_strings_review = hardstop_strings_review
self.hardstop_strings_review.update(USER_AVOID_LIST)
self.hardstop_strings_review.update(likely_obscured_strings)
# force ignore post on these strings, even if ML model accepts post
self.hardstop_strings_reject = hardstop_strings_reject
# `LIMITED_USERS` allows limiting the rate at which we interact with certain users, e.g. bots who post extremely often or people who send huge numbers of asks
#
# `LIMITED_USERS` should be a dict with usernames as keys. the values are floats. a value of X means approximately "respond to this user at most once per X hours."
self.LIMITED_USERS = LIMITED_USERS
# like `LIMITED_USERS`, but triggers the limiting on the presence of a substring in the input, rather than the name of the user
self.LIMITED_SUBSTRINGS = LIMITED_SUBSTRINGS
# write draft instead of auto-publish when responding to these users
self.SCREENED_USERS = SCREENED_USERS
self.NO_SCRAPE_USERS = NO_SCRAPE_USERS
self.ask_min_words = ask_min_words
@staticmethod
def load(path: str = "config.json") -> "BotSpecificConstants":
with open(path, "r", encoding="utf-8") as f:
constants = json.load(f)
list_to_set_keys = {
"NO_REBLOG_IDS",
"FORCE_TRAIL_HACK_IDS",
"USER_AVOID_LIST",
"TAG_AVOID_LIST",
"DASH_TAG_AVOID_LIST",
"REPLY_USER_AUTO_ACCEPT_LIST",
"bad_strings",
"bad_strings_shortwords",
"okay_superstrings",
"likely_obscured_strings",
"profane_strings",
"hardstop_strings_review",
"hardstop_strings_reject",
"SCREENED_USERS",
"NO_SCRAPE_USERS",
}
for list_to_set_key in list_to_set_keys:
constants[list_to_set_key] = set(constants[list_to_set_key])
return BotSpecificConstants(**constants)
@property
def private_clients(self) -> List[RateLimitClient]:
return [
RateLimitClient.from_tumblr_rest_client(
pytumblr.TumblrRestClient(*keys), self.blogName
)
for keys in self.private_clients_api_keys
]
@property
def dashboard_clients(self) -> List[RateLimitClient]:
return [
RateLimitClient.from_tumblr_rest_client(
pytumblr.TumblrRestClient(*keys), self.dash_blogName
)
for keys in self.dashboard_clients_api_keys
]
@property
def bridge_service_url(self):
return self.bridge_service_host + ":" + str(self.bridge_service_port)
def LIMITED_USERS_PROBS(self, EFFECTIVE_SLEEP_TIME) -> dict:
LIMITED_USERS_MINUTES_LOWER_BOUNDS = {
name: hours * 60 for name, hours in self.LIMITED_USERS.items()
}
LIMITED_USERS_PROBS = {
name: EFFECTIVE_SLEEP_TIME / (60 * lb)
for name, lb in LIMITED_USERS_MINUTES_LOWER_BOUNDS.items()
}
return LIMITED_USERS_PROBS
| 40.630208 | 199 | 0.672991 | from typing import List, Set, Dict
import json
import pytumblr
from api_tumblr.pytumblr_wrapper import RateLimitClient
API_KEYS_TYPE = List[str]
class BotSpecificConstants:
def __init__(
self,
blogName: str,
dash_blogName: str,
REBLOG_START_TS: int,
DASH_START_TS: int,
private_clients_api_keys: List[API_KEYS_TYPE],
dashboard_clients_api_keys: List[API_KEYS_TYPE],
bridge_service_host: str,
bridge_service_port: int,
BRIDGE_SERVICE_REMOTE_HOST: str,
BUCKET_NAME: str,
ask_min_words: int,
NO_REBLOG_IDS: Set[int] = set(),
DEF_REBLOG_IDS: Set[int] = set(),
FORCE_TRAIL_HACK_IDS: Set[int] = set(),
USER_AVOID_LIST: Set[str] = set(),
TAG_AVOID_LIST: Set[str] = set(),
DASH_TAG_AVOID_LIST: Set[str] = set(),
REPLY_USER_AUTO_ACCEPT_LIST: Set[str] = set(),
bad_strings: Set[str] = set(),
bad_strings_shortwords: Set[str] = set(),
okay_superstrings: Set[str] = set(),
likely_obscured_strings: Set[str] = set(),
profane_strings: Set[str] = set(),
hardstop_strings_review: Set[str] = set(),
hardstop_strings_reject: Set[str] = set(),
LIMITED_USERS: Dict[str, float] = dict(),
LIMITED_SUBSTRINGS: Dict[str, float] = dict(),
SCREENED_USERS: Set[str] = set(),
NO_SCRAPE_USERS: Set[str] = set(),
):
self.blogName = blogName
self.dash_blogName = dash_blogName
self.REBLOG_START_TS = REBLOG_START_TS
self.DASH_START_TS = DASH_START_TS
# i don't want a separate bot reblog "responding" to me
self.NO_REBLOG_IDS = NO_REBLOG_IDS
self.DEF_REBLOG_IDS = DEF_REBLOG_IDS
self.FORCE_TRAIL_HACK_IDS = FORCE_TRAIL_HACK_IDS
self.private_clients_api_keys = private_clients_api_keys
self.dashboard_clients_api_keys = dashboard_clients_api_keys
self.bridge_service_host = bridge_service_host
self.bridge_service_port = bridge_service_port
self.BUCKET_NAME = BUCKET_NAME
self.BRIDGE_SERVICE_REMOTE_HOST = BRIDGE_SERVICE_REMOTE_HOST
self.USER_AVOID_LIST = USER_AVOID_LIST
# bot-written post tags are removed if they contain any of these (substring matches, case-insensitive)
self.TAG_AVOID_LIST = TAG_AVOID_LIST
# don't reblog from dash if tags contain these (substring matches)
self.DASH_TAG_AVOID_LIST = DASH_TAG_AVOID_LIST
self.REPLY_USER_AUTO_ACCEPT_LIST = REPLY_USER_AUTO_ACCEPT_LIST
# write draft instead of auto-publish when post/tags contain these substrings
self.bad_strings = bad_strings
# form elements of bad_strings from these surrounded by various whitespace/punctuation
self.bad_strings_shortwords = bad_strings_shortwords
# ignore items from `bad_strings` when they appear inside of these longer strings
# e.g. if we wanted to filter "sex" without filtering "anne sexton"
self.okay_superstrings = okay_superstrings
# like bad_strings, but we attempt to detect these even if the user is trying to obscure them
# with e.g. zero-width unicode or l33tsp34k
self.likely_obscured_strings = likely_obscured_strings
# like bad_strings, but only used in contexts where we're trying to keep the language rated PG
self.profane_strings = profane_strings
self.hardstop_strings_review = hardstop_strings_review
self.hardstop_strings_review.update(USER_AVOID_LIST)
self.hardstop_strings_review.update(likely_obscured_strings)
self.hardstop_strings_reject = hardstop_strings_reject
self.LIMITED_USERS = LIMITED_USERS
self.LIMITED_SUBSTRINGS = LIMITED_SUBSTRINGS
self.SCREENED_USERS = SCREENED_USERS
self.NO_SCRAPE_USERS = NO_SCRAPE_USERS
self.ask_min_words = ask_min_words
@staticmethod
def load(path: str = "config.json") -> "BotSpecificConstants":
with open(path, "r", encoding="utf-8") as f:
constants = json.load(f)
list_to_set_keys = {
"NO_REBLOG_IDS",
"FORCE_TRAIL_HACK_IDS",
"USER_AVOID_LIST",
"TAG_AVOID_LIST",
"DASH_TAG_AVOID_LIST",
"REPLY_USER_AUTO_ACCEPT_LIST",
"bad_strings",
"bad_strings_shortwords",
"okay_superstrings",
"likely_obscured_strings",
"profane_strings",
"hardstop_strings_review",
"hardstop_strings_reject",
"SCREENED_USERS",
"NO_SCRAPE_USERS",
}
for list_to_set_key in list_to_set_keys:
constants[list_to_set_key] = set(constants[list_to_set_key])
return BotSpecificConstants(**constants)
@property
def private_clients(self) -> List[RateLimitClient]:
return [
RateLimitClient.from_tumblr_rest_client(
pytumblr.TumblrRestClient(*keys), self.blogName
)
for keys in self.private_clients_api_keys
]
@property
def dashboard_clients(self) -> List[RateLimitClient]:
return [
RateLimitClient.from_tumblr_rest_client(
pytumblr.TumblrRestClient(*keys), self.dash_blogName
)
for keys in self.dashboard_clients_api_keys
]
@property
def bridge_service_url(self):
return self.bridge_service_host + ":" + str(self.bridge_service_port)
def LIMITED_USERS_PROBS(self, EFFECTIVE_SLEEP_TIME) -> dict:
LIMITED_USERS_MINUTES_LOWER_BOUNDS = {
name: hours * 60 for name, hours in self.LIMITED_USERS.items()
}
LIMITED_USERS_PROBS = {
name: EFFECTIVE_SLEEP_TIME / (60 * lb)
for name, lb in LIMITED_USERS_MINUTES_LOWER_BOUNDS.items()
}
return LIMITED_USERS_PROBS
| true | true |
f73cabbee568613375f471d809d94db0936580d5 | 3,015 | py | Python | app/kivymd/backgroundcolorbehavior.py | blurmcclure18/ExpenseTracker | b4c65095298fbb551fc3fc7ecffd5d5029401e87 | [
"MIT"
] | 16 | 2019-10-19T11:34:35.000Z | 2021-12-16T07:10:25.000Z | app/kivymd/backgroundcolorbehavior.py | blurmcclure18/ExpenseTracker | b4c65095298fbb551fc3fc7ecffd5d5029401e87 | [
"MIT"
] | 1 | 2021-09-27T04:37:28.000Z | 2021-09-27T04:37:28.000Z | app/kivymd/backgroundcolorbehavior.py | blurmcclure18/ExpenseTracker | b4c65095298fbb551fc3fc7ecffd5d5029401e87 | [
"MIT"
] | 11 | 2019-10-21T00:01:55.000Z | 2022-01-17T06:02:23.000Z | """
Background Color Behavior
=========================
Copyright (c) 2015 Andrés Rodríguez and KivyMD contributors -
KivyMD library up to version 0.1.2
Copyright (c) 2019 Ivanov Yuri and KivyMD contributors -
KivyMD library version 0.1.3 and higher
For suggestions and questions:
<kivydevelopment@gmail.com>
This file is distributed under the terms of the same license,
as the Kivy framework.
"""
from kivy.lang import Builder
from kivy.properties import BoundedNumericProperty, ReferenceListProperty
from kivy.properties import OptionProperty, ListProperty
from kivy.uix.widget import Widget
from kivy.utils import get_color_from_hex
from kivymd.color_definitions import palette, hue, text_colors
Builder.load_string(
"""
<BackgroundColorBehavior>
canvas:
Color:
rgba: self.md_bg_color
Rectangle:
size: self.size
pos: self.pos
"""
)
class BackgroundColorBehavior(Widget):
r = BoundedNumericProperty(1.0, min=0.0, max=1.0)
g = BoundedNumericProperty(1.0, min=0.0, max=1.0)
b = BoundedNumericProperty(1.0, min=0.0, max=1.0)
a = BoundedNumericProperty(0.0, min=0.0, max=1.0)
md_bg_color = ReferenceListProperty(r, g, b, a)
class SpecificBackgroundColorBehavior(BackgroundColorBehavior):
background_palette = OptionProperty(
"Primary", options=["Primary", "Accent", *palette]
)
background_hue = OptionProperty("500", options=hue)
specific_text_color = ListProperty([0, 0, 0, 0.87])
specific_secondary_text_color = ListProperty([0, 0, 0, 0.87])
def _update_specific_text_color(self, instance, value):
if hasattr(self, "theme_cls"):
palette = {
"Primary": self.theme_cls.primary_palette,
"Accent": self.theme_cls.accent_palette,
}.get(self.background_palette, self.background_palette)
else:
palette = {"Primary": "Blue", "Accent": "Amber"}.get(
self.background_palette, self.background_palette
)
color = get_color_from_hex(text_colors[palette][self.background_hue])
secondary_color = color[:]
# Check for black text (need to adjust opacity)
if (color[0] + color[1] + color[2]) == 0:
color[3] = 0.87
secondary_color[3] = 0.54
else:
secondary_color[3] = 0.7
self.specific_text_color = color
self.specific_secondary_text_color = secondary_color
def __init__(self, **kwargs):
super().__init__(**kwargs)
if hasattr(self, "theme_cls"):
self.theme_cls.bind(primary_palette=self._update_specific_text_color)
self.theme_cls.bind(accent_palette=self._update_specific_text_color)
self.theme_cls.bind(theme_style=self._update_specific_text_color)
self.bind(background_hue=self._update_specific_text_color)
self.bind(background_palette=self._update_specific_text_color)
self._update_specific_text_color(None, None)
| 35.470588 | 81 | 0.678607 |
from kivy.lang import Builder
from kivy.properties import BoundedNumericProperty, ReferenceListProperty
from kivy.properties import OptionProperty, ListProperty
from kivy.uix.widget import Widget
from kivy.utils import get_color_from_hex
from kivymd.color_definitions import palette, hue, text_colors
Builder.load_string(
"""
<BackgroundColorBehavior>
canvas:
Color:
rgba: self.md_bg_color
Rectangle:
size: self.size
pos: self.pos
"""
)
class BackgroundColorBehavior(Widget):
r = BoundedNumericProperty(1.0, min=0.0, max=1.0)
g = BoundedNumericProperty(1.0, min=0.0, max=1.0)
b = BoundedNumericProperty(1.0, min=0.0, max=1.0)
a = BoundedNumericProperty(0.0, min=0.0, max=1.0)
md_bg_color = ReferenceListProperty(r, g, b, a)
class SpecificBackgroundColorBehavior(BackgroundColorBehavior):
background_palette = OptionProperty(
"Primary", options=["Primary", "Accent", *palette]
)
background_hue = OptionProperty("500", options=hue)
specific_text_color = ListProperty([0, 0, 0, 0.87])
specific_secondary_text_color = ListProperty([0, 0, 0, 0.87])
def _update_specific_text_color(self, instance, value):
if hasattr(self, "theme_cls"):
palette = {
"Primary": self.theme_cls.primary_palette,
"Accent": self.theme_cls.accent_palette,
}.get(self.background_palette, self.background_palette)
else:
palette = {"Primary": "Blue", "Accent": "Amber"}.get(
self.background_palette, self.background_palette
)
color = get_color_from_hex(text_colors[palette][self.background_hue])
secondary_color = color[:]
if (color[0] + color[1] + color[2]) == 0:
color[3] = 0.87
secondary_color[3] = 0.54
else:
secondary_color[3] = 0.7
self.specific_text_color = color
self.specific_secondary_text_color = secondary_color
def __init__(self, **kwargs):
super().__init__(**kwargs)
if hasattr(self, "theme_cls"):
self.theme_cls.bind(primary_palette=self._update_specific_text_color)
self.theme_cls.bind(accent_palette=self._update_specific_text_color)
self.theme_cls.bind(theme_style=self._update_specific_text_color)
self.bind(background_hue=self._update_specific_text_color)
self.bind(background_palette=self._update_specific_text_color)
self._update_specific_text_color(None, None)
| true | true |
f73cace2d882d031da2e578e09924d6544493556 | 6,764 | py | Python | src/tests.py | ThomasRanvier/map_maker | e36ddcc7d5959957d83fae778d8ef715c79712e7 | [
"MIT"
] | null | null | null | src/tests.py | ThomasRanvier/map_maker | e36ddcc7d5959957d83fae778d8ef715c79712e7 | [
"MIT"
] | null | null | null | src/tests.py | ThomasRanvier/map_maker | e36ddcc7d5959957d83fae778d8ef715c79712e7 | [
"MIT"
] | null | null | null | from mapping.map import Map
from utils.position import Position
from utils.utils import bresenham_line, filled_midpoint_circle
import matplotlib.pyplot as plt
def map_to_grid_pos():
print('Test: map_to_grid_pos')
lower_left_pos = Position(-5.0, -5.0)
upper_right_pos = Position(5.0, 5.0)
test_map = Map(lower_left_pos, upper_right_pos, 2.0)
assert(test_map.grid.shape == (20, 20))
grid_pos = test_map.to_grid_pos(Position(-5, -5))
assert(grid_pos.x == 0 and grid_pos.y == 0)
grid_pos = test_map.to_grid_pos(Position(-4.5, -5))
assert(grid_pos.x == 1 and grid_pos.y == 0)
grid_pos = test_map.to_grid_pos(Position(-4.501, -5))
assert(grid_pos.x == 0 and grid_pos.y == 0)
grid_pos = test_map.to_grid_pos(Position(5, 5))
assert(grid_pos.x == 20 and grid_pos.y == 20)
grid_pos = test_map.to_grid_pos(Position(4.99, 4.99))
assert(grid_pos.x == 19 and grid_pos.y == 19)
print('OK')
def map_to_real_pos():
print('Test: map_to_real_pos')
lower_left_pos = Position(-5.0, -5.0)
upper_right_pos = Position(5.0, 5.0)
test_map = Map(lower_left_pos, upper_right_pos, 2.0)
assert(test_map.grid.shape == (20, 20))
real_pos = test_map.to_real_pos(Position(0, 0))
assert(real_pos.x == -5 and real_pos.y == -5)
real_pos = test_map.to_real_pos(Position(1, 0))
assert(real_pos.x == -4.5 and real_pos.y == -5)
real_pos = test_map.to_real_pos(Position(2, 0))
assert(real_pos.x == -4 and real_pos.y == -5)
real_pos = test_map.to_real_pos(Position(20, 20))
assert(real_pos.x == 5 and real_pos.y == 5)
real_pos = test_map.to_real_pos(Position(19, 19))
assert(real_pos.x == 4.5 and real_pos.y == 4.5)
print('OK')
def utils_bresenham_line():
print('Test: utils_bresenham_line')
line = bresenham_line(0, 0, 5, 5)
assert(line[0].x == 0 and line[0].y == 0)
assert(line[1].x == 1 and line[1].y == 1)
assert(line[2].x == 2 and line[2].y == 2)
assert(line[3].x == 3 and line[3].y == 3)
assert(line[4].x == 4 and line[4].y == 4)
assert(line[5].x == 5 and line[5].y == 5)
line = bresenham_line(5, 5, 0, 0)
assert(line[0].x == 5 and line[0].y == 5)
assert(line[1].x == 4 and line[1].y == 4)
assert(line[2].x == 3 and line[2].y == 3)
assert(line[3].x == 2 and line[3].y == 2)
assert(line[4].x == 1 and line[4].y == 1)
assert(line[5].x == 0 and line[5].y == 0)
line = bresenham_line(2, 5, 8, 9)
assert(line[0].x == 2 and line[0].y == 5)
assert(line[1].x == 3 and line[1].y == 6)
assert(line[2].x == 4 and line[2].y == 6)
assert(line[3].x == 5 and line[3].y == 7)
assert(line[4].x == 6 and line[4].y == 8)
assert(line[5].x == 7 and line[5].y == 8)
assert(line[6].x == 8 and line[6].y == 9)
print('OK')
def utils_filled_midpoint_circle():
print('Test: utils_filled_midpoint_circle')
circle = filled_midpoint_circle(5, 5, 5)
result = [' x: 0 y: 5',
' x: 1 y: 5',
' x: 2 y: 5',
' x: 3 y: 5',
' x: 4 y: 5',
' x: 5 y: 5',
' x: 6 y: 5',
' x: 7 y: 5',
' x: 8 y: 5',
' x: 9 y: 5',
' x: 10 y: 5',
' x: 0 y: 6',
' x: 1 y: 6',
' x: 2 y: 6',
' x: 3 y: 6',
' x: 4 y: 6',
' x: 5 y: 6',
' x: 6 y: 6',
' x: 7 y: 6',
' x: 8 y: 6',
' x: 9 y: 6',
' x: 10 y: 6',
' x: 0 y: 4',
' x: 1 y: 4',
' x: 2 y: 4',
' x: 3 y: 4',
' x: 4 y: 4',
' x: 5 y: 4',
' x: 6 y: 4',
' x: 7 y: 4',
' x: 8 y: 4',
' x: 9 y: 4',
' x: 10 y: 4',
' x: 0 y: 7',
' x: 1 y: 7',
' x: 2 y: 7',
' x: 3 y: 7',
' x: 4 y: 7',
' x: 5 y: 7',
' x: 6 y: 7',
' x: 7 y: 7',
' x: 8 y: 7',
' x: 9 y: 7',
' x: 10 y: 7',
' x: 0 y: 3',
' x: 1 y: 3',
' x: 2 y: 3',
' x: 3 y: 3',
' x: 4 y: 3',
' x: 5 y: 3',
' x: 6 y: 3',
' x: 7 y: 3',
' x: 8 y: 3',
' x: 9 y: 3',
' x: 10 y: 3',
' x: 3 y: 10',
' x: 3 y: 0',
' x: 4 y: 10',
' x: 4 y: 0',
' x: 5 y: 10',
' x: 5 y: 0',
' x: 6 y: 10',
' x: 6 y: 0',
' x: 7 y: 10',
' x: 7 y: 0',
' x: 1 y: 8',
' x: 2 y: 8',
' x: 3 y: 8',
' x: 4 y: 8',
' x: 5 y: 8',
' x: 6 y: 8',
' x: 7 y: 8',
' x: 8 y: 8',
' x: 9 y: 8',
' x: 1 y: 2',
' x: 2 y: 2',
' x: 3 y: 2',
' x: 4 y: 2',
' x: 5 y: 2',
' x: 6 y: 2',
' x: 7 y: 2',
' x: 8 y: 2',
' x: 9 y: 2',
' x: 2 y: 9',
' x: 2 y: 1',
' x: 3 y: 9',
' x: 3 y: 1',
' x: 4 y: 9',
' x: 4 y: 1',
' x: 5 y: 9',
' x: 5 y: 1',
' x: 6 y: 9',
' x: 6 y: 1',
' x: 7 y: 9',
' x: 7 y: 1',
' x: 8 y: 9',
' x: 8 y: 1']
for i in range(len(circle)):
assert(str(circle[i] == result[i]))
print('OK')
def position_properties():
print('Test: position_properties')
pos_1 = Position(x=1, y=2, angle=4)
pos_2 = Position(x=1, y=2, angle=4)
pos_3 = Position(x=1, angle=4, z=5, y=2)
assert(str(pos_1) == ' x: 1 y: 2 angle: 4')
assert(str(pos_2) == ' x: 1 y: 2 angle: 4')
assert(str(pos_3) == ' x: 1 y: 2 z: 5 angle: 4')
assert(pos_1 == pos_2)
assert(pos_1 != pos_3)
assert(pos_2 != pos_3)
poses = set([])
assert(len(poses) == 0)
poses.add(pos_1)
assert(len(poses) == 1)
poses.add(pos_3)
assert(len(poses) == 2)
poses.add(pos_2)
assert(len(poses) == 2)
print('OK')
if __name__ == '__main__':
map_to_grid_pos()
map_to_real_pos()
utils_bresenham_line()
utils_filled_midpoint_circle()
position_properties()
print('End of tests')
print('OK')
| 33.651741 | 62 | 0.409077 | from mapping.map import Map
from utils.position import Position
from utils.utils import bresenham_line, filled_midpoint_circle
import matplotlib.pyplot as plt
def map_to_grid_pos():
print('Test: map_to_grid_pos')
lower_left_pos = Position(-5.0, -5.0)
upper_right_pos = Position(5.0, 5.0)
test_map = Map(lower_left_pos, upper_right_pos, 2.0)
assert(test_map.grid.shape == (20, 20))
grid_pos = test_map.to_grid_pos(Position(-5, -5))
assert(grid_pos.x == 0 and grid_pos.y == 0)
grid_pos = test_map.to_grid_pos(Position(-4.5, -5))
assert(grid_pos.x == 1 and grid_pos.y == 0)
grid_pos = test_map.to_grid_pos(Position(-4.501, -5))
assert(grid_pos.x == 0 and grid_pos.y == 0)
grid_pos = test_map.to_grid_pos(Position(5, 5))
assert(grid_pos.x == 20 and grid_pos.y == 20)
grid_pos = test_map.to_grid_pos(Position(4.99, 4.99))
assert(grid_pos.x == 19 and grid_pos.y == 19)
print('OK')
def map_to_real_pos():
print('Test: map_to_real_pos')
lower_left_pos = Position(-5.0, -5.0)
upper_right_pos = Position(5.0, 5.0)
test_map = Map(lower_left_pos, upper_right_pos, 2.0)
assert(test_map.grid.shape == (20, 20))
real_pos = test_map.to_real_pos(Position(0, 0))
assert(real_pos.x == -5 and real_pos.y == -5)
real_pos = test_map.to_real_pos(Position(1, 0))
assert(real_pos.x == -4.5 and real_pos.y == -5)
real_pos = test_map.to_real_pos(Position(2, 0))
assert(real_pos.x == -4 and real_pos.y == -5)
real_pos = test_map.to_real_pos(Position(20, 20))
assert(real_pos.x == 5 and real_pos.y == 5)
real_pos = test_map.to_real_pos(Position(19, 19))
assert(real_pos.x == 4.5 and real_pos.y == 4.5)
print('OK')
def utils_bresenham_line():
print('Test: utils_bresenham_line')
line = bresenham_line(0, 0, 5, 5)
assert(line[0].x == 0 and line[0].y == 0)
assert(line[1].x == 1 and line[1].y == 1)
assert(line[2].x == 2 and line[2].y == 2)
assert(line[3].x == 3 and line[3].y == 3)
assert(line[4].x == 4 and line[4].y == 4)
assert(line[5].x == 5 and line[5].y == 5)
line = bresenham_line(5, 5, 0, 0)
assert(line[0].x == 5 and line[0].y == 5)
assert(line[1].x == 4 and line[1].y == 4)
assert(line[2].x == 3 and line[2].y == 3)
assert(line[3].x == 2 and line[3].y == 2)
assert(line[4].x == 1 and line[4].y == 1)
assert(line[5].x == 0 and line[5].y == 0)
line = bresenham_line(2, 5, 8, 9)
assert(line[0].x == 2 and line[0].y == 5)
assert(line[1].x == 3 and line[1].y == 6)
assert(line[2].x == 4 and line[2].y == 6)
assert(line[3].x == 5 and line[3].y == 7)
assert(line[4].x == 6 and line[4].y == 8)
assert(line[5].x == 7 and line[5].y == 8)
assert(line[6].x == 8 and line[6].y == 9)
print('OK')
def utils_filled_midpoint_circle():
print('Test: utils_filled_midpoint_circle')
circle = filled_midpoint_circle(5, 5, 5)
result = [' x: 0 y: 5',
' x: 1 y: 5',
' x: 2 y: 5',
' x: 3 y: 5',
' x: 4 y: 5',
' x: 5 y: 5',
' x: 6 y: 5',
' x: 7 y: 5',
' x: 8 y: 5',
' x: 9 y: 5',
' x: 10 y: 5',
' x: 0 y: 6',
' x: 1 y: 6',
' x: 2 y: 6',
' x: 3 y: 6',
' x: 4 y: 6',
' x: 5 y: 6',
' x: 6 y: 6',
' x: 7 y: 6',
' x: 8 y: 6',
' x: 9 y: 6',
' x: 10 y: 6',
' x: 0 y: 4',
' x: 1 y: 4',
' x: 2 y: 4',
' x: 3 y: 4',
' x: 4 y: 4',
' x: 5 y: 4',
' x: 6 y: 4',
' x: 7 y: 4',
' x: 8 y: 4',
' x: 9 y: 4',
' x: 10 y: 4',
' x: 0 y: 7',
' x: 1 y: 7',
' x: 2 y: 7',
' x: 3 y: 7',
' x: 4 y: 7',
' x: 5 y: 7',
' x: 6 y: 7',
' x: 7 y: 7',
' x: 8 y: 7',
' x: 9 y: 7',
' x: 10 y: 7',
' x: 0 y: 3',
' x: 1 y: 3',
' x: 2 y: 3',
' x: 3 y: 3',
' x: 4 y: 3',
' x: 5 y: 3',
' x: 6 y: 3',
' x: 7 y: 3',
' x: 8 y: 3',
' x: 9 y: 3',
' x: 10 y: 3',
' x: 3 y: 10',
' x: 3 y: 0',
' x: 4 y: 10',
' x: 4 y: 0',
' x: 5 y: 10',
' x: 5 y: 0',
' x: 6 y: 10',
' x: 6 y: 0',
' x: 7 y: 10',
' x: 7 y: 0',
' x: 1 y: 8',
' x: 2 y: 8',
' x: 3 y: 8',
' x: 4 y: 8',
' x: 5 y: 8',
' x: 6 y: 8',
' x: 7 y: 8',
' x: 8 y: 8',
' x: 9 y: 8',
' x: 1 y: 2',
' x: 2 y: 2',
' x: 3 y: 2',
' x: 4 y: 2',
' x: 5 y: 2',
' x: 6 y: 2',
' x: 7 y: 2',
' x: 8 y: 2',
' x: 9 y: 2',
' x: 2 y: 9',
' x: 2 y: 1',
' x: 3 y: 9',
' x: 3 y: 1',
' x: 4 y: 9',
' x: 4 y: 1',
' x: 5 y: 9',
' x: 5 y: 1',
' x: 6 y: 9',
' x: 6 y: 1',
' x: 7 y: 9',
' x: 7 y: 1',
' x: 8 y: 9',
' x: 8 y: 1']
for i in range(len(circle)):
assert(str(circle[i] == result[i]))
print('OK')
def position_properties():
print('Test: position_properties')
pos_1 = Position(x=1, y=2, angle=4)
pos_2 = Position(x=1, y=2, angle=4)
pos_3 = Position(x=1, angle=4, z=5, y=2)
assert(str(pos_1) == ' x: 1 y: 2 angle: 4')
assert(str(pos_2) == ' x: 1 y: 2 angle: 4')
assert(str(pos_3) == ' x: 1 y: 2 z: 5 angle: 4')
assert(pos_1 == pos_2)
assert(pos_1 != pos_3)
assert(pos_2 != pos_3)
poses = set([])
assert(len(poses) == 0)
poses.add(pos_1)
assert(len(poses) == 1)
poses.add(pos_3)
assert(len(poses) == 2)
poses.add(pos_2)
assert(len(poses) == 2)
print('OK')
if __name__ == '__main__':
map_to_grid_pos()
map_to_real_pos()
utils_bresenham_line()
utils_filled_midpoint_circle()
position_properties()
print('End of tests')
print('OK')
| true | true |
f73cad27089a75a5f21bee4d25a349744852b8dc | 442 | py | Python | dbcm2/_nbdev.py | barrypj/dbcm2 | 637921187d4359e77177aee657bcefc862d42925 | [
"Apache-2.0"
] | null | null | null | dbcm2/_nbdev.py | barrypj/dbcm2 | 637921187d4359e77177aee657bcefc862d42925 | [
"Apache-2.0"
] | null | null | null | dbcm2/_nbdev.py | barrypj/dbcm2 | 637921187d4359e77177aee657bcefc862d42925 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"ConnectionError": "00_DBcm.ipynb",
"CredentialsError": "00_DBcm.ipynb",
"SQLError": "00_DBcm.ipynb",
"UseDatabase": "00_DBcm.ipynb"}
modules = ["DBcm.py"]
doc_url = "https://barrypj.github.io/dbcm2/"
git_url = "https://github.com/barrypj/dbcm2/tree/branch/"
def custom_doc_links(name): return None
| 26 | 61 | 0.667421 |
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"ConnectionError": "00_DBcm.ipynb",
"CredentialsError": "00_DBcm.ipynb",
"SQLError": "00_DBcm.ipynb",
"UseDatabase": "00_DBcm.ipynb"}
modules = ["DBcm.py"]
doc_url = "https://barrypj.github.io/dbcm2/"
git_url = "https://github.com/barrypj/dbcm2/tree/branch/"
def custom_doc_links(name): return None
| true | true |
f73cad93c0f5712bda49b85741dff479e6491c32 | 2,870 | py | Python | shadowlands/sl_contract/__init__.py | kayagoban/shadowlands | bf10f9d17788bcc49a8f515b3f5049c63227b670 | [
"MIT"
] | 140 | 2018-10-10T18:52:47.000Z | 2022-02-14T11:31:52.000Z | shadowlands/sl_contract/__init__.py | kayagoban/shadowlands | bf10f9d17788bcc49a8f515b3f5049c63227b670 | [
"MIT"
] | 12 | 2018-10-10T22:07:58.000Z | 2020-05-12T10:56:29.000Z | shadowlands/sl_contract/__init__.py | kayagoban/shadowlands | bf10f9d17788bcc49a8f515b3f5049c63227b670 | [
"MIT"
] | 9 | 2018-10-10T20:17:55.000Z | 2021-12-16T09:10:19.000Z | from eth_utils import decode_hex
class ContractConfigError(Exception):
pass
class InvalidW3Error(Exception):
pass
class SLContract():
def __init__(self, node, address=None, provided_abi=None):
self._contract = None
self._node = node
if not self._node.w3.isConnected():
raise InvalidW3Error('w3 is not connected in the node you passed in to the Contract constructor')
try:
if address is None:
address = self.__class__.__dict__[node.network_name.upper()]
if address is '':
raise
except:
raise ContractConfigError(
'No address given for contract on this network. Did you set the address constant for {}?'.format(
node.network_name.upper()
)
)
# If on MAINNET, Attempt to resolve
try:
# This is the best way to verify the hex string address is actually an address.
address = node.w3.toChecksumAddress(address)
except ValueError:
# if on mainnet, we can attempt to resolve the address if this is really an ENS name.
if node.network_name.upper() == 'MAINNET':
address = node.ens.address(address)
if address is None:
raise ContractConfigError('Attempt to resolve contract address from ENS failed.')
else:
raise ContractConfigError("Given contract address '{}' does not appear to be valid.".format(address))
if self.ABI is None and provided_abi is None:
raise ContractConfigError('Could not open the Dapp contract with the given address and ABI.')
if provided_abi is None:
self._contract = node.w3.eth.contract(address, abi=self.ABI)
else:
self._contract = node.w3.eth.contract(address, abi=provided_abi)
if self._contract == None:
raise ContractConfigError('Could not open the Dapp contract with the given address and ABI.')
@property
def w3(self):
return self._node.w3
@property
def node(self):
return self._node
@property
def sha3(self):
return self._node.w3.sha3
@property
def functions(self):
return self._contract.functions
@property
def address(self):
return self._contract.address
def toWei(self, amount, denomination):
return self._node.w3.toWei(amount, denomination)
def fromWei(self, amount, denomination):
return self._node.w3.fromWei(amount, denomination)
def bytes32(self, an_int):
return (an_int).to_bytes(32, byteorder='big')
def to_sol_addr(self, address):
return decode_hex(address.replace('0x',''))
def to_bytes_32(self, value):
return self.bytes32(value)
| 32.613636 | 117 | 0.618467 | from eth_utils import decode_hex
class ContractConfigError(Exception):
pass
class InvalidW3Error(Exception):
pass
class SLContract():
def __init__(self, node, address=None, provided_abi=None):
self._contract = None
self._node = node
if not self._node.w3.isConnected():
raise InvalidW3Error('w3 is not connected in the node you passed in to the Contract constructor')
try:
if address is None:
address = self.__class__.__dict__[node.network_name.upper()]
if address is '':
raise
except:
raise ContractConfigError(
'No address given for contract on this network. Did you set the address constant for {}?'.format(
node.network_name.upper()
)
)
try:
address = node.w3.toChecksumAddress(address)
except ValueError:
if node.network_name.upper() == 'MAINNET':
address = node.ens.address(address)
if address is None:
raise ContractConfigError('Attempt to resolve contract address from ENS failed.')
else:
raise ContractConfigError("Given contract address '{}' does not appear to be valid.".format(address))
if self.ABI is None and provided_abi is None:
raise ContractConfigError('Could not open the Dapp contract with the given address and ABI.')
if provided_abi is None:
self._contract = node.w3.eth.contract(address, abi=self.ABI)
else:
self._contract = node.w3.eth.contract(address, abi=provided_abi)
if self._contract == None:
raise ContractConfigError('Could not open the Dapp contract with the given address and ABI.')
@property
def w3(self):
return self._node.w3
@property
def node(self):
return self._node
@property
def sha3(self):
return self._node.w3.sha3
@property
def functions(self):
return self._contract.functions
@property
def address(self):
return self._contract.address
def toWei(self, amount, denomination):
return self._node.w3.toWei(amount, denomination)
def fromWei(self, amount, denomination):
return self._node.w3.fromWei(amount, denomination)
def bytes32(self, an_int):
return (an_int).to_bytes(32, byteorder='big')
def to_sol_addr(self, address):
return decode_hex(address.replace('0x',''))
def to_bytes_32(self, value):
return self.bytes32(value)
| true | true |
f73cae2fdf5cd48c33c248edc42c16dd21785fbf | 589 | py | Python | dynabuffers-python/tests/usecase/Schema07Test.py | leftshiftone/dynabuffers | c3e94c56989be3df87b50b8d9e17d1ea86199ede | [
"Apache-2.0"
] | 2 | 2019-10-28T12:28:01.000Z | 2020-07-07T12:25:40.000Z | dynabuffers-python/tests/usecase/Schema07Test.py | leftshiftone/dynabuffers | c3e94c56989be3df87b50b8d9e17d1ea86199ede | [
"Apache-2.0"
] | 1 | 2021-12-21T07:35:22.000Z | 2021-12-21T07:35:22.000Z | dynabuffers-python/tests/usecase/Schema07Test.py | leftshiftone/dynabuffers | c3e94c56989be3df87b50b8d9e17d1ea86199ede | [
"Apache-2.0"
] | 1 | 2020-03-19T09:19:43.000Z | 2020-03-19T09:19:43.000Z | import os
import unittest
from antlr4 import FileStream
from dynabuffers.Dynabuffers import Dynabuffers
class Schema05Test(unittest.TestCase):
root_dir = os.path.dirname(os.path.realpath(__file__))
def test_parse(self):
with open(self.root_dir + "/1.jpg", 'rb') as f:
data = b"".join(f.readlines())
engine = Dynabuffers.parse(FileStream(self.root_dir + "/schema07.dbs"))
map = engine.deserialize(engine.serialize({"image": data}))
self.assertEqual(map, {"image": data})
if __name__ == "__main__":
unittest.main()
| 25.608696 | 83 | 0.658744 | import os
import unittest
from antlr4 import FileStream
from dynabuffers.Dynabuffers import Dynabuffers
class Schema05Test(unittest.TestCase):
root_dir = os.path.dirname(os.path.realpath(__file__))
def test_parse(self):
with open(self.root_dir + "/1.jpg", 'rb') as f:
data = b"".join(f.readlines())
engine = Dynabuffers.parse(FileStream(self.root_dir + "/schema07.dbs"))
map = engine.deserialize(engine.serialize({"image": data}))
self.assertEqual(map, {"image": data})
if __name__ == "__main__":
unittest.main()
| true | true |
f73cae82e0878e186c75679681f3254c01370313 | 18 | py | Python | encoders/__init__.py | alexbires/webshellmanagement | ad12f5e8889c5e03bab7efadc5c3d65f7155290c | [
"MIT"
] | 1 | 2015-02-08T03:23:17.000Z | 2015-02-08T03:23:17.000Z | encoders/__init__.py | alexbires/webshellmanagement | ad12f5e8889c5e03bab7efadc5c3d65f7155290c | [
"MIT"
] | null | null | null | encoders/__init__.py | alexbires/webshellmanagement | ad12f5e8889c5e03bab7efadc5c3d65f7155290c | [
"MIT"
] | 2 | 2020-01-17T09:39:25.000Z | 2020-01-17T09:39:33.000Z | __all__ = ['php']
| 9 | 17 | 0.555556 | __all__ = ['php']
| true | true |
f73caf5ede9d719f5f69b4c939a834518f625245 | 554 | py | Python | meiduo_mall/meiduo_mall/apps/addresses/urls.py | lhz0707/meiduo | 5ba6bbb82a28f5c93e7c8d40cdab7ee41b6593d0 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/addresses/urls.py | lhz0707/meiduo | 5ba6bbb82a28f5c93e7c8d40cdab7ee41b6593d0 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/addresses/urls.py | lhz0707/meiduo | 5ba6bbb82a28f5c93e7c8d40cdab7ee41b6593d0 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^addresses',views.AddressView.as_view()),
# 获取省信息
url(r'^areas/$', views.AreasView.as_view()),
# 保存收貨地址
url(r'addresses/create/$',views.AddressCreateView.as_view()),
# 修改收貨地址
# url(r'addresses//(?P<pk>\d+)/$',views.AddressDefaultView.as_view()),
# url(r'^addresses/(?P<pk>\d+)/default/$', views.AddressDefaultView.as_view()),
# # 设置地址标题
# url(r'^addresses/(?P<pk>\d+)/title/$', views.AddressTitleView.as_view()),
]
| 32.588235 | 82 | 0.666065 | from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^addresses',views.AddressView.as_view()),
url(r'^areas/$', views.AreasView.as_view()),
url(r'addresses/create/$',views.AddressCreateView.as_view()),
| true | true |
f73caf7955658402212e4b9c371c86bb9aac86e7 | 6,642 | py | Python | cocomo.py | SaulNunez/Estimacion-COCOMO | 58f504f5f096914a02961d360ab7537cc4ce2cde | [
"MIT"
] | null | null | null | cocomo.py | SaulNunez/Estimacion-COCOMO | 58f504f5f096914a02961d360ab7537cc4ce2cde | [
"MIT"
] | null | null | null | cocomo.py | SaulNunez/Estimacion-COCOMO | 58f504f5f096914a02961d360ab7537cc4ce2cde | [
"MIT"
] | null | null | null | from enum import Enum
#Constantes del método Cocomo
class Cocomo():
class Tipo(Enum):
ORGANICO = 1
SEMI_ACOPLADO = 2
EMPOTRADO = 3
class Modelo(Enum):
BASICO = 1
INTERMEDIO = 2
AVANZADO = 3
constantes = {
Modelo.BASICO : {
Tipo.ORGANICO : {
'a':2.40,
'b':1.05,
'c':2.50,
'd':0.38
},
Tipo.SEMI_ACOPLADO : {
'a':3.00,
'b':1.12,
'c':2.50,
'd':0.35
},
Tipo.EMPOTRADO : {
'a':3.60,
'b':1.20,
'c':2.50,
'd':0.33
}
},
Modelo.INTERMEDIO : {
Tipo.ORGANICO : {
'a':3.20,
'b':1.05,
'c':2.50,
'd':0.38
},
Tipo.SEMI_ACOPLADO : {
'a':3.00,
'b':1.12,
'c':2.50,
'd':0.35
},
Tipo.EMPOTRADO : {
'a':2.80,
'b':1.20,
'c':2.50,
'd':0.32
}
},
Modelo.AVANZADO : {
#Nota, estos valores no esta verificados
#Solo estan puestos para evitar fallas en ejecución
Tipo.ORGANICO : {
'a':3.20,
'b':1.05,
'c':2.50,
'd':0.38
},
Tipo.SEMI_ACOPLADO : {
'a':3.00,
'b':1.12,
'c':2.50,
'd':0.35
},
Tipo.EMPOTRADO : {
'a':2.80,
'b':1.20,
'c':2.50,
'd':0.32
}
}
}
indice_loc = {
'Ensamblador': 320,
'Macroensamblador': 213,
'C': 150,
'Fortran': 106,
'Cobol': 106,
'Pascal': 91,
'Cobol ANSI 85': 91,
'Basic': 91,
'RPG': 80,
'PL/I': 80,
'Ada': 71,
'Basic ANSI/Quick/Turbo': 64,
'Java': 53,
'Visual C++': 34,
'Foxpro 2,5': 34,
'Visual Basic': 32,
'Delphi': 29,
'C++': 29,
'Visual Cobol': 20,
'Clipper': 19,
'Power Builder': 16,
'Hoja de Calculo': 6,
'SQL' : 12,
'Prolog/LISP': 64
}
preguntas_grado_total_influencia = [
'1) ¿Requiere el sistema copias de seguridad y de recuperación fiables?',
'2) ¿Se requiere de comunicación de datos?',
'3) ¿Existen funciones de procesamiento distribuido?',
'4) ¿Es crítico el rendimiento?',
'5) ¿Se ejecutará el sistema en un entorno operativo existente y fuertemente utilizado?',
'6) ¿Requiere el sistema entrada de datos interactiva?',
'7) ¿Requiere la entrada de datos interactiva que las transacciones de entrada se lleven a cao sobre múltiples pantallas u operaciones?',
'8) ¿Se actualizan los archivos maestros de forma interactiva?',
'9) ¿Son complejas las entradas, salidas, archivos o las peticiones?',
'10) ¿Es complejo el procesamiento interno?',
'11) ¿Se ha diseñado el código para ser reutilizable?',
'12) ¿Estan incluidas en el diseño la conversión y la instalación?',
'13) ¿Se ha diseñado el sistema para soportar múltiples instalaciones en diferentes organizaciones?',
'14) ¿Se ha diseñado la aplicación para facilitar los cambios y para ser fácilmente utilizada por el usuario?'
]
conductores_coste_fae = {
'Flexibilidad requerida del software': {
'Muy Bajo':0.75,
'Bajo':0.88,
'Nominal':1.00,
'Alto':1.15,
'Muy Alto':1.40
},
'Tamaño de la base de datos': {
'Bajo':0.94,
'Nominal':1.00,
'Alto':1.08,
'Muy Alto':1.16
},
'Complejidad del producto': {
'Muy Bajo':0.70,
'Bajo':0.85,
'Nominal':1.00,
'Alto':1.15,
'Muy Alto':1.30,
'Extremandamente Alto':1.65
},
'Restricciones del teimpo de ejecución':{
'Nominal':1.00,
'Alto':1.11,
'Muy Alto':1.30,
'Extremandamente Alto':1.66
},
'Restricciones del almacenamiento principal': {
'Nominal':1.00,
'Alto':1.06,
'Muy Alto':1.21,
'Extremandamente Alto':1.56
},
'Volatilidad de la máquina virtual':{
'Bajo':0.87,
'Nominal':1.00,
'Alto':1.15,
'Muy Alto':1.30
},
'Tiempo de respuesta del ordenador':{
'Bajo':0.87,
'Nominal':1.00,
'Alto':1.07,
'Muy Alto':1.15
},
'Capacidad del analista':{
'Muy Bajo':1.46,
'Bajo':1.19,
'Nominal':1.00,
'Alto':0.86,
'Muy Alto':0.71
},
'Experiencia de la aplicación':{
'Muy Bajo':1.29,
'Bajo':1.13,
'Nominal':1.00,
'Alto':0.91,
'Muy Alto':0.82
},
'Capacidad de los programadores':{
'Muy Bajo':1.42,
'Bajo':1.17,
'Nominal':1.00,
'Alto':0.86,
'Muy Alto':0.70
},
'Experiencia en S.O utlizado':{
'Muy Bajo':1.21,
'Bajo':1.10,
'Nominal':1.00,
'Alto':0.90
},
'Experiencia en el lenguaje de programación':{
'Muy Bajo':1.14,
'Bajo':1.07,
'Nominal':1.00,
'Alto':0.95
},
'Prácticas de programación modernas':{
'Muy Bajo':1.24,
'Bajo':1.10,
'Nominal':1.00,
'Alto':0.91,
'Muy Alto':0.82
},
'Utilización de herramientas':{
'Muy Bajo':1.24,
'Bajo':1.10,
'Nominal':1.00,
'Alto':0.91,
'Muy Alto':0.83
},
'Limitaciones de planificación del proyecto':{
'Muy Bajo':1.23,
'Bajo':1.08,
'Nominal':1.00,
'Alto':1.04,
'Muy Alto':1.10
}
} | 29.651786 | 146 | 0.4053 | from enum import Enum
class Cocomo():
class Tipo(Enum):
ORGANICO = 1
SEMI_ACOPLADO = 2
EMPOTRADO = 3
class Modelo(Enum):
BASICO = 1
INTERMEDIO = 2
AVANZADO = 3
constantes = {
Modelo.BASICO : {
Tipo.ORGANICO : {
'a':2.40,
'b':1.05,
'c':2.50,
'd':0.38
},
Tipo.SEMI_ACOPLADO : {
'a':3.00,
'b':1.12,
'c':2.50,
'd':0.35
},
Tipo.EMPOTRADO : {
'a':3.60,
'b':1.20,
'c':2.50,
'd':0.33
}
},
Modelo.INTERMEDIO : {
Tipo.ORGANICO : {
'a':3.20,
'b':1.05,
'c':2.50,
'd':0.38
},
Tipo.SEMI_ACOPLADO : {
'a':3.00,
'b':1.12,
'c':2.50,
'd':0.35
},
Tipo.EMPOTRADO : {
'a':2.80,
'b':1.20,
'c':2.50,
'd':0.32
}
},
Modelo.AVANZADO : {
Tipo.ORGANICO : {
'a':3.20,
'b':1.05,
'c':2.50,
'd':0.38
},
Tipo.SEMI_ACOPLADO : {
'a':3.00,
'b':1.12,
'c':2.50,
'd':0.35
},
Tipo.EMPOTRADO : {
'a':2.80,
'b':1.20,
'c':2.50,
'd':0.32
}
}
}
indice_loc = {
'Ensamblador': 320,
'Macroensamblador': 213,
'C': 150,
'Fortran': 106,
'Cobol': 106,
'Pascal': 91,
'Cobol ANSI 85': 91,
'Basic': 91,
'RPG': 80,
'PL/I': 80,
'Ada': 71,
'Basic ANSI/Quick/Turbo': 64,
'Java': 53,
'Visual C++': 34,
'Foxpro 2,5': 34,
'Visual Basic': 32,
'Delphi': 29,
'C++': 29,
'Visual Cobol': 20,
'Clipper': 19,
'Power Builder': 16,
'Hoja de Calculo': 6,
'SQL' : 12,
'Prolog/LISP': 64
}
preguntas_grado_total_influencia = [
'1) ¿Requiere el sistema copias de seguridad y de recuperación fiables?',
'2) ¿Se requiere de comunicación de datos?',
'3) ¿Existen funciones de procesamiento distribuido?',
'4) ¿Es crítico el rendimiento?',
'5) ¿Se ejecutará el sistema en un entorno operativo existente y fuertemente utilizado?',
'6) ¿Requiere el sistema entrada de datos interactiva?',
'7) ¿Requiere la entrada de datos interactiva que las transacciones de entrada se lleven a cao sobre múltiples pantallas u operaciones?',
'8) ¿Se actualizan los archivos maestros de forma interactiva?',
'9) ¿Son complejas las entradas, salidas, archivos o las peticiones?',
'10) ¿Es complejo el procesamiento interno?',
'11) ¿Se ha diseñado el código para ser reutilizable?',
'12) ¿Estan incluidas en el diseño la conversión y la instalación?',
'13) ¿Se ha diseñado el sistema para soportar múltiples instalaciones en diferentes organizaciones?',
'14) ¿Se ha diseñado la aplicación para facilitar los cambios y para ser fácilmente utilizada por el usuario?'
]
conductores_coste_fae = {
'Flexibilidad requerida del software': {
'Muy Bajo':0.75,
'Bajo':0.88,
'Nominal':1.00,
'Alto':1.15,
'Muy Alto':1.40
},
'Tamaño de la base de datos': {
'Bajo':0.94,
'Nominal':1.00,
'Alto':1.08,
'Muy Alto':1.16
},
'Complejidad del producto': {
'Muy Bajo':0.70,
'Bajo':0.85,
'Nominal':1.00,
'Alto':1.15,
'Muy Alto':1.30,
'Extremandamente Alto':1.65
},
'Restricciones del teimpo de ejecución':{
'Nominal':1.00,
'Alto':1.11,
'Muy Alto':1.30,
'Extremandamente Alto':1.66
},
'Restricciones del almacenamiento principal': {
'Nominal':1.00,
'Alto':1.06,
'Muy Alto':1.21,
'Extremandamente Alto':1.56
},
'Volatilidad de la máquina virtual':{
'Bajo':0.87,
'Nominal':1.00,
'Alto':1.15,
'Muy Alto':1.30
},
'Tiempo de respuesta del ordenador':{
'Bajo':0.87,
'Nominal':1.00,
'Alto':1.07,
'Muy Alto':1.15
},
'Capacidad del analista':{
'Muy Bajo':1.46,
'Bajo':1.19,
'Nominal':1.00,
'Alto':0.86,
'Muy Alto':0.71
},
'Experiencia de la aplicación':{
'Muy Bajo':1.29,
'Bajo':1.13,
'Nominal':1.00,
'Alto':0.91,
'Muy Alto':0.82
},
'Capacidad de los programadores':{
'Muy Bajo':1.42,
'Bajo':1.17,
'Nominal':1.00,
'Alto':0.86,
'Muy Alto':0.70
},
'Experiencia en S.O utlizado':{
'Muy Bajo':1.21,
'Bajo':1.10,
'Nominal':1.00,
'Alto':0.90
},
'Experiencia en el lenguaje de programación':{
'Muy Bajo':1.14,
'Bajo':1.07,
'Nominal':1.00,
'Alto':0.95
},
'Prácticas de programación modernas':{
'Muy Bajo':1.24,
'Bajo':1.10,
'Nominal':1.00,
'Alto':0.91,
'Muy Alto':0.82
},
'Utilización de herramientas':{
'Muy Bajo':1.24,
'Bajo':1.10,
'Nominal':1.00,
'Alto':0.91,
'Muy Alto':0.83
},
'Limitaciones de planificación del proyecto':{
'Muy Bajo':1.23,
'Bajo':1.08,
'Nominal':1.00,
'Alto':1.04,
'Muy Alto':1.10
}
} | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.