index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
986,600 | 62e00944d8d6579b88415dfc137bdfdc466bf41d | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The piptool module imports pip requirements into Bazel rules."""
import argparse
import ast
import atexit
import collections
import hashlib
import io
import itertools
import json
import os
import pkgutil
import pkg_resources
import re
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import shutil
import sys
import tempfile
import toposort
import zipfile
from six.moves import urllib
# Note: We carefully import the following modules in a particular
# order, since these modules modify the import path and machinery.
import pkg_resources
def extract_packages(package_names):
"""Extract zipfile contents to disk and add to import path"""
# Set a safe extraction dir
extraction_tmpdir = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(
extraction_tmpdir, ignore_errors=True))
pkg_resources.set_extraction_path(extraction_tmpdir)
# Extract each package to disk
dirs_to_add = []
for package_name in package_names:
req = pkg_resources.Requirement.parse(package_name)
extraction_dir = pkg_resources.resource_filename(req, '')
dirs_to_add.append(extraction_dir)
# Add extracted directories to import path ahead of their zip file
# counterparts.
sys.path[0:0] = dirs_to_add
existing_pythonpath = os.environ.get('PYTHONPATH')
if existing_pythonpath:
dirs_to_add.extend(existing_pythonpath.split(':'))
os.environ['PYTHONPATH'] = ':'.join(dirs_to_add)
# Wheel, pip, and setuptools are much happier running from actual
# files on disk, rather than entries in a zipfile. Extract zipfile
# contents, add those contents to the path, then import them.
extract_packages(['pip', 'setuptools', 'wheel'])
# Defeat pip's attempt to mangle sys.path
saved_sys_path = sys.path
sys.path = sys.path[:]
import pip
import pip._internal
sys.path = saved_sys_path
import setuptools
import wheel
def pip_main(argv, env=None):
# Extract the certificates from the PAR following the example of get-pip.py
# https://github.com/pypa/get-pip/blob/04e994a41ff0a97812d6d2/templates/default.py#L167-L171
cert_path = os.path.join(tempfile.mkdtemp(), "cacert.pem")
with open(cert_path, "wb") as cert:
cert.write(pkgutil.get_data("pip._vendor.certifi", "cacert.pem"))
argv = ["--disable-pip-version-check", "--cert", cert_path] + argv
old_env = os.environ.copy()
try:
if env:
os.environ.update(env)
return pip._internal.main(argv)
finally:
os.environ.clear()
os.environ.update(old_env)
from rules_python.whl import Wheel
global_parser = argparse.ArgumentParser(
description='Import Python dependencies into Bazel.')
subparsers = global_parser.add_subparsers()
def split_extra(s):
parts = s.split("[")
if len(parts) == 1:
return parts[0], None
return parts[0], parts[1][:-1]
class CaptureOutput():
def write(self, data):
self.stdout_save.write(data)
self.stdout.write(data.encode())
def __getattr__(self, name):
return getattr(self.stdout_save, name)
def __enter__(self):
self.stdout_save = sys.stdout
self.stdout = io.BytesIO()
sys.stdout = self
return self
def __exit__(self, *exc_details):
sys.stdout = self.stdout_save
# piptool build
# -------------
def build_wheel(distribution,
version,
directory,
cache_key=None,
build_dir=None,
build_env=None,
build_deps=None,
sha256=None,
pip_args=None,
resolving=False):
env = {}
home = None
if build_dir:
home = build_dir.rstrip("/") + ".home"
if os.path.isdir(home):
shutil.rmtree(home)
os.makedirs(home)
else:
home = tempfile.mkdtemp()
cmd = ["wheel"]
cmd += ["-w", directory]
# Allowing "pip wheel" to download setup_requires packages with easy_install would
# poke a hole to our wheel version locking scheme, making wheel builds non-deterministic.
# Disable easy_install as instructed here:
# https://pip.pypa.io/en/stable/reference/pip_install/#controlling-setup-requires
# We set HOME to the current directory so pip will look at this file; see:
# https://docs.python.org/2/install/index.html#distutils-configuration-files
env["HOME"] = home
with open(os.path.join(home, ".pydistutils.cfg"), "w") as f:
f.write("[easy_install]\nallow_hosts = ''\n")
for d in build_deps or []:
Wheel(d).expand(home)
# Process .pth files of the extracted build deps.
with open(os.path.join(home, "sitecustomize.py"), "w") as f:
f.write("import site; import os; site.addsitedir(os.path.dirname(__file__))")
# Set PYTHONPATH so that all extracted buildtime dependencies are available.
env["PYTHONPATH"] = ":".join(os.environ.get("PYTHONPATH", "").split(":") + [home])
env["CFLAGS"] = " ".join([
"-D__DATE__=\"redacted\"",
"-D__TIMESTAMP__=\"redacted\"",
"-D__TIME__=\"redacted\"",
"-Wno-builtin-macro-redefine",
])
# We don't want .pyc files end up in the built wheels!
env["PYTHONDONTWRITEBYTECODE"] = "1"
# Set any other custom env variables the user wants to add to the wheel build.
env.update(dict([x.split("=", 1) for x in build_env or []]))
# For determinism, canonicalize distribution name to lowercase here, since, lo and
# behold, the wheel contents may be different depending on the case passed to
# "pip wheel" command...
cmd += ["%s==%s" % (distribution.lower(), version)]
cmd += ["--no-cache-dir"]
cmd += ["--no-deps"]
# Build the wheel in a deterministic path so that any debug symbols have stable
# paths and the resulting wheel has a higher chance of being deterministic.
if build_dir:
if os.path.isdir(build_dir):
shutil.rmtree(build_dir)
cmd += ["--build", build_dir]
cmd += pip_args or []
locally_built = False
with CaptureOutput() as output:
if pip_main(cmd, env):
print("pip command failed: " + str(cmd))
sys.exit(1)
if re.search(r"Running setup\.py bdist_wheel", output.stdout.getvalue().decode()):
locally_built = True
wheels = wheels_from_dir(directory)
assert len(wheels) == 1
wheel = wheels[0]
if locally_built:
# The wheel was built locally. For determinism, we need to strip timestamps
# from the zip-file.
strip_wheel(wheel)
computed_sha256 = digest(wheel.path())
if sha256 and computed_sha256 != sha256:
if resolving:
if locally_built:
# If we built the wheel locally and the sha256 had changed from the previous one,
# build the wheel again to make sure we get the same digest again.
os.rename(wheel.path(), wheel.path() + ".0")
if pip_main(cmd, env):
sys.exit(1)
strip_wheel(wheel)
second_sha256 = digest(wheel.path())
if computed_sha256 != second_sha256:
os.rename(wheel.path(), wheel.path() + ".1")
print("Wheel build not deterministic:")
print(" %s.0: %s" % (wheel.path(), computed_sha256))
print(" %s.1: %s" % (wheel.path(), second_sha256))
sys.exit(1)
os.remove(wheel.path() + ".0")
else:
# If the user supplied an expected sha256, the built wheel should match it.
print("\033[0;33mWARNING:\033[0m Built wheel %s digest %s does not match expected digest %s." % (wheel.path(), computed_sha256, sha256))
shutil.rmtree(home)
return computed_sha256
def get_cache_url(args):
cache_base = os.environ.get("BAZEL_WHEEL_CACHE")
if not cache_base or not args.cache_key:
return None
return "{}/{}".format(cache_base, args.cache_key)
def get_remote_retry_attempts():
env_value = os.environ.get("BAZEL_WHEEL_REMOTE_RETRY_ATTEMPTS")
if not env_value or env_value == '0':
return 0
else:
return int(env_value)
def local_fallback_enabled():
env_value = os.environ.get("BAZEL_WHEEL_LOCAL_FALLBACK")
if env_value and env_value == '1':
return True
else:
return False
def requests_with_retry(retries):
session = requests.Session()
# Retry on server and gateway errors as they may be intermittent.
# Retry intervals are [0.0, 0.2, 0.4, 0.8, ...] seconds.
retry = Retry(total=retries, backoff_factor=0.1, status_forcelist=(500, 502, 503, 504))
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def build(args):
build_wheel(**vars(args))
parser = subparsers.add_parser('build', help='Download or build a single wheel, optionally checking from cache first')
parser.set_defaults(func=build)
parser.add_argument('--directory', action='store', default='.',
help=('The directory into which to put .whl file.'))
parser.add_argument('--cache-key', action='store',
help=('The cache key to use when looking up .whl file from cache.'))
parser.add_argument('--build-dir', action='store',
help=('A directory to build the wheel in, needs to be stable to keep the build deterministic (e.g. debug symbols).'))
parser.add_argument('--build-env', action='append', default=[],
help=('Environmental variables to set when building.'))
parser.add_argument('--build-deps', action='append', default=[],
help=('Wheels that are required to be installed when building.'))
parser.add_argument('--distribution', action='store',
help=('Name of the distribution to build.'))
parser.add_argument('--version', action='store',
help=('Version of the distribution to build.'))
parser.add_argument('--sha256', action='store',
help=('The expected sha256 digest of the built wheel.'))
parser.add_argument('--pip_arg', dest='pip_args', action='append', default=[],
help=('Extra arguments to send to pip.'))
# piptool extract
# ---------------
def extract(args):
whl = Wheel(args.whl)
whl.expand(args.directory)
parser = subparsers.add_parser('extract', help='Extract a wheel')
parser.set_defaults(func=extract)
parser.add_argument('--whl', action='store', required=True,
help=('The .whl file we are expanding.'))
parser.add_argument('--directory', action='store', default='.',
help='The directory into which to expand things.')
# piptool genbuild
# ---------------
def genbuild(args):
whl = Wheel(args.whl)
extra_deps = args.add_dependency or []
drop_deps = {d: None for d in args.drop_dependency or []}
external_deps = [d for d in itertools.chain(whl.dependencies(), extra_deps) if d not in drop_deps]
contents = []
add_build_content = args.add_build_content or []
for name in add_build_content:
with open(name) as f:
contents.append(f.read() + '\n')
contents = '\n'.join(contents)
parser = whl.entrypoints()
entrypoints_build = ''
if parser:
if parser.has_section('console_scripts'):
for name, location in parser.items('console_scripts'):
# Assumes it doesn't depend on extras. TODO(conrado): fix
entrypoint_file = 'entrypoint_%s.py' % name
with open(os.path.join(args.directory, entrypoint_file), 'w') as f:
f.write("""from %s import %s as main; main()""" % tuple(location.split(":")))
attrs = []
attrs += [("name", '"%s"' % name)]
attrs += [("srcs", '["%s"]' % entrypoint_file)]
attrs += [("main", '"%s"' % entrypoint_file)]
if args.python_version:
attrs += [("python_version", '"%s"' % args.python_version)]
attrs += [("deps", '[":pkg"]')]
entrypoints_build += """
py_binary(
{attrs}
)
""".format(attrs=",\n ".join(['{} = {}'.format(k, v) for k, v in attrs]))
attrs = []
if args.patches:
attrs += [("patches", '["%s"]' % '", "'.join(args.patches))]
if args.patch_tool:
attrs += [("patch_tool", '"%s"' % args.patch_tool)]
if args.patch_args:
attrs += [("patch_args", '["%s"]' % '", "'.join(args.patch_args))]
if args.patch_cmds:
attrs += [("patch_cmds", '["%s"]' % '", "'.join(args.patch_cmds))]
attrs += [("distribution", '"%s"' % whl.distribution().lower())]
attrs += [("version", '"%s"' % whl.version())]
attrs += [("wheel_size", "%s" % os.path.getsize(args.whl))]
if args.python_version:
attrs += [("python_version", '"%s"' % args.python_version)]
with open(os.path.join(args.directory, 'BUILD'), 'w') as f:
f.write("""
package(default_visibility = ["//visibility:public"])
load("@io_bazel_rules_python//python:python.bzl", "extract_wheel")
load("@{repository}//:requirements.bzl", "requirement")
extract_wheel(
name = "extracted",
wheel = "{wheel}",
{attrs}
)
py_library(
name = "pkg",
imports = ["extracted"],
deps = [
":extracted",{dependencies}
],
)
{extras}
{entrypoints_build}
{contents}""".format(
wheel = whl.basename(),
repository=args.repository,
dependencies=''.join([
('\n "%s",' % d) if d[0] == "@" else ('\n requirement("%s"),' % d)
for d in sorted(external_deps)
]),
attrs=",\n ".join(['{} = {}'.format(k, v) for k, v in attrs]),
extras='\n\n'.join([
"""py_library(
name = "{extra}",
deps = [
":pkg",{deps}
],
)""".format(extra=extra,
deps=','.join([
'requirement("%s")' % dep
for dep in sorted(whl.dependencies(extra))
]))
for extra in args.extras or []
]),
entrypoints_build=entrypoints_build,
contents=contents))
parser = subparsers.add_parser('genbuild', help='Extract one or more wheels as a py_library')
parser.set_defaults(func=genbuild)
parser.add_argument('--whl', action='store', required=True,
help=('The .whl file we are expanding.'))
parser.add_argument('--repository', action='store', required=True,
help='The pip_import from which to draw dependencies.')
parser.add_argument('--add-dependency', action='append',
help='Specify additional dependencies beyond the ones specified in the wheel.')
parser.add_argument('--drop-dependency', action='append',
help='Specify dependencies to ignore.')
parser.add_argument('--add-build-content', action='append',
help='Specify lines to add to the BUILD file.')
parser.add_argument('--directory', action='store', default='.',
help='The directory into which to expand things.')
parser.add_argument('--extras', action='append',
help='The set of extras for which to generate library targets.')
parser.add_argument('--python-version', action='store',
help='Specify python_version (PY2 or PY3) to set for the py_binary targets.')
parser.add_argument('--patches', action='append')
parser.add_argument('--patch-tool', action='store')
parser.add_argument('--patch-args', action='append')
parser.add_argument('--patch-cmds', action='append')
# piptool resolve
# ---------------
def determine_possible_extras(whls):
"""Determines the list of possible "extras" for each .whl
The possibility of an extra is determined by looking at its
additional requirements, and determinine whether they are
satisfied by the complete list of available wheels.
Args:
whls: a list of Wheel objects
Returns:
a dict that is keyed by the Wheel objects in whls, and whose
values are lists of possible extras.
"""
whl_map = {
whl.name(): whl
for whl in whls
}
# TODO(mattmoor): Consider memoizing if this recursion ever becomes
# expensive enough to warrant it.
def is_possible(name, extra):
# If we don't have the .whl at all, then this isn't possible.
if name not in whl_map:
return False
whl = whl_map[name]
# If we have the .whl, and we don't need anything extra then
# we can satisfy this dependency.
if not extra:
return True
# If we do need something extra, then check the extra's
# dependencies to make sure they are fully satisfied.
for extra_dep in whl.dependencies(extra=extra):
req = pkg_resources.Requirement.parse(extra_dep)
# Check that the dep and any extras are all possible.
if not is_possible(req.project_name, None):
return False
for e in req.extras:
if not is_possible(req.project_name, e):
return False
# If all of the dependencies of the extra are satisfiable then
# it is possible to construct this dependency.
return True
return {
whl: [
extra
for extra in whl.extras()
if is_possible(whl.name(), extra)
]
for whl in whls
}
def build_dep_graph(input_files, build_info):
pattern = re.compile('[a-zA-Z0-9_-]+')
flatten = lambda l: [item for sublist in l for item in sublist]
dist_to_lines = collections.defaultdict(list)
for i in input_files:
with open(i) as f:
for l in f.readlines():
l = l.strip()
m = pattern.match(l)
if m:
dist_to_lines[m.group()].append(l)
if not build_info:
return [flatten(dist_to_lines.values())]
deps = collections.defaultdict(list)
for dist, info in build_info.items():
for d in info.get("additional_buildtime_deps", []):
deps[dist] += dist_to_lines[d]
graph = {r: set(deps[n]) if n in deps else set() for n,rr in dist_to_lines.items() for r in rr}
result = list(toposort.toposort(graph))
return result
def wheels_from_dir(dir):
def list_whls(dir):
for root, _, filenames in os.walk(dir + "/"):
for fname in filenames:
if fname.endswith('.whl'):
yield Wheel(os.path.join(root, fname))
whls = list(list_whls(dir))
whls.sort(key=lambda x: x.name())
return whls
def strip_wheel(w):
ts = (1980, 1, 1, 0, 0, 0)
tempdir = tempfile.mkdtemp()
try:
w.expand(tempdir)
with zipfile.ZipFile(w.path(), 'w', zipfile.ZIP_DEFLATED) as zipf:
for root, dirs, files in os.walk(tempdir):
dirs.sort() # https://stackoverflow.com/questions/18282370/in-what-order-does-os-walk-iterates-iterate
for f in sorted(files):
local_path = os.path.join(root, f)
with open(local_path, "rb") as ff:
info = zipfile.ZipInfo(os.path.relpath(local_path, start=tempdir), ts)
info.external_attr = (os.stat(local_path).st_mode & 0o777) << 16
zipf.writestr(info, ff.read())
finally:
shutil.rmtree(tempdir)
def digest(fname):
d = hashlib.sha256()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
d.update(chunk)
return d.hexdigest()
def resolve(args):
print("Generating %s from %s..." % (args.output, " and ".join(args.input)))
print(args)
# Parse build_info - this is the contents of "requirements_overrides" attribute
# passed by the user, serialized to json.
build_info = json.loads(args.build_info or '{}')
ordering = build_dep_graph(args.input, build_info)
tempdir = tempfile.mkdtemp()
existing_pythonpath = os.environ.get('PYTHONPATH', '')
env = {}
env['PYTHONPATH'] = tempdir + ':' + existing_pythonpath
env["HOME"] = tempdir
with open(os.path.join(tempdir, ".pydistutils.cfg"), "w") as f:
# WAR for macOS: https://github.com/Homebrew/brew/issues/837
f.write("[install]\nprefix=\n")
# Process .pth files of the extracted build deps.
with open(os.path.join(tempdir, "sitecustomize.py"), "w") as f:
f.write("import site; import os; site.addsitedir(os.path.dirname(__file__))")
downloaded_wheel_urls = {}
for i, o in enumerate(ordering):
# Install the wheels since they can be dependent at build time
for _, _, filelist in os.walk(args.directory):
filelist = [f for f in filelist if f.endswith('.whl')]
filelist = [os.path.join(args.directory, f) for f in filelist]
if filelist:
pip_args = ["install", "-q", "--upgrade", "-t", tempdir] + filelist
if pip_main(pip_args, env):
shutil.rmtree(tempdir)
sys.exit(1)
with tempfile.NamedTemporaryFile(mode='w+') as f:
with tempfile.NamedTemporaryFile(mode='w+') as f2:
f.write('\n'.join(o))
f.flush()
f2.write('\n'.join(['\n'.join(c) for c in ordering]))
f2.flush()
pip_args = ["wheel"]
#pip_args += ["--cache-dir", cache_dir]
if args.directory:
pip_args += ["-w", args.directory]
#if args.input:
# pip_args += ["--requirement=" + i for i in args.input]
pip_args += ["--requirement=" + f.name]
pip_args += ["--constraint=" + f2.name]
pip_args += args.pip_args
with CaptureOutput() as output:
if pip_main(pip_args, env):
print("pip command failed: " + str(pip_args))
shutil.rmtree(tempdir)
sys.exit(1)
dls = re.findall(r'(?:Downloading|Using cached) (\S*\.whl)', output.stdout.getvalue().decode())
downloaded_wheel_urls.update({
urllib.parse.unquote(url[url.rfind("/")+1 :]): url
for url in dls
})
shutil.rmtree(tempdir)
# Enumerate the .whl files we downloaded.
whls = wheels_from_dir(args.directory)
possible_extras = determine_possible_extras(whls)
def quote(string):
return '"{}"'.format(string)
whl_map = {
whl.name(): whl
for whl in whls
}
def transitive_deps(wheel, extra=None, collected=None, build_info=None):
deps = wheel.dependencies(extra)
if build_info:
deps |= set(build_info.get(wheel.name(), {}).get("additional_runtime_deps", []))
if collected is None:
collected = set()
for dep in deps.copy():
if dep not in collected:
collected.add(dep)
d, extra = split_extra(dep)
deps |= transitive_deps(whl_map[d], extra, collected, build_info)
return deps
def transitive_build_deps(wheel, build_info):
deps = set()
for build_dep in build_info.get(wheel.name(), {}).get("additional_buildtime_deps", []):
# Add any packages mentioned explicitly in "additional_buildtime_deps".
deps |= {whl_map[build_dep]}
# Add any runtime deps of such packages.
for runtime_dep_of_build_dep in transitive_deps(whl_map[build_dep], build_info=build_info):
deps |= {whl_map[runtime_dep_of_build_dep]}
return deps
wheel_digests = {}
try:
with open(args.output, 'r') as f:
contents = f.read()
contents = re.sub(r"^wheels = ", "", contents, flags=re.MULTILINE)
# Need to use literal_eval, since this is bzl, not json (trailing commas, comments).
wheel_info = ast.literal_eval(contents)
wheel_digests.update({k: v["sha256"] for k, v in wheel_info.items() if "sha256" in v})
except (ValueError, IOError):
# If we can't parse the old wheel map, the remaining steps will be a bit slower.
print("Failed to parse old wheel map, but this is OK.")
# If user requested digests, we build each wheel again in isolation to get a
# deterministic sha256.
if args.digests:
for w in whls:
# If we downloaded a whl file instead of building it locally, we can use its digest as is.
if w.basename() in downloaded_wheel_urls:
wheel_digests[w.name()] = digest(w.path())
continue
# If the current (not-yet-updated) requirements.bzl already has a sha256 and it
# matches with the sha of the wheel that we bulit during resolve (typical for
# binary distributions), then we can just use that.
resolved_digest = digest(w.path())
if w.name() in wheel_digests:
if resolved_digest == wheel_digests[w.name()]:
continue
build_deps = {w.path() for w in transitive_build_deps(w, build_info)}
build_env = build_info.get(w.name(), {}).get("additional_buildtime_env", [])
tempdir = tempfile.mkdtemp()
try:
sha256 = build_wheel(
distribution=w.distribution(),
version=build_info.get(w.name(), {}).get("version", w.version()),
directory=tempdir,
# NOTE: The build-dir here must match the one that we use in the
# individual build_wheel() rules later, otherwise the sha256 that we
# compute here will not match the output of build_wheel() due to debug
# symbols.
build_dir="/tmp/pip-build/%s_wheel" % w.repository_name(prefix=args.name),
build_env=build_env,
build_deps=build_deps,
pip_args=args.pip_args,
sha256=wheel_digests.get(w.name(), None),
resolving=True,
)
wheel_digests[w.name()] = sha256
finally:
shutil.rmtree(tempdir)
if args.output_format == 'download':
# We are generating a checked-in version of requirements.bzl.
# For determinism, avoid clashes with other pip_import repositories,
# and prefix the current pip_import domain to the lib repo name.
lib_repo = lambda w: w.repository_name(prefix=args.name)
# Each wheel has its own repository that, refer to that.
wheel_repo = lambda w: lib_repo(w) + '_wheel'
else:
# We are generating requirements.bzl to the bazel output area (legacy mode).
# Use the good old 'pypi__' refix.
lib_repo = lambda w: w.repository_name(prefix='pypi')
# Wheels are downloaded to the pip_import repository, refer to that.
wheel_repo = lambda w: args.name
def whl_library(wheel):
attrs = []
attrs += [("name", quote(lib_repo(wheel)))]
attrs += [("version", quote(wheel.version()))]
attrs += [("wheel_name", quote(wheel.basename()))]
if args.digests:
attrs += [("sha256", quote(wheel_digests[wheel.name()]))]
url = downloaded_wheel_urls.get(wheel.basename(), None)
if url:
attrs += [("urls", '[{}]'.format(quote(url)))]
if args.output_format != 'download':
attrs += [("wheel", '"@{}//:{}"'.format(args.name, wheel.basename()))]
extras = ', '.join([quote(extra) for extra in sorted(possible_extras.get(wheel, []))])
if extras != '':
attrs += [("extras", '[{}]'.format(extras))]
build_deps = {w.name() for w in transitive_build_deps(wheel, build_info)}
build_deps = ', '.join([quote(dep) for dep in sorted(build_deps)])
if build_deps != '':
attrs += [("build_deps", '[{}]'.format(build_deps))]
return """"{}": {{
{},
}},""".format(wheel.name(), ",\n ".join(['"{}": {}'.format(k, v) for k, v in attrs]))
with open(args.output, 'w') as f:
f.write("""\
# Install pip requirements.
#
{comment}
wheels = {{
{wheels}
}}
""".format(comment='\n'.join(['# Generated from ' + i for i in args.input]),
wheels='\n '.join(map(whl_library, whls))))
parser = subparsers.add_parser('resolve', help='Resolve requirements.bzl from requirements.txt')
parser.set_defaults(func=resolve)
parser.add_argument('--name', action='store', required=True,
help=('The namespace of the import.'))
parser.add_argument('--build-info', action='store',
help=('Additional build info as a string-serialized python dict.'))
parser.add_argument('--input', action='append', required=True,
help=('The requirements.txt file(s) to import.'))
parser.add_argument('--output', action='store', required=True,
help=('The requirements.bzl file to export.'))
parser.add_argument('--output-format', choices=['refer', 'download'], default='refer',
help=('How whl_library rules should obtain the wheel.'))
parser.add_argument('--directory', action='store', default='.',
help=('The directory into which to put .whl files.'))
parser.add_argument('--digests', action='store_true',
help=('Emit sha256 digests for the bulit wheels, and ensure deterministic build.'))
parser.add_argument('--pip-arg', dest='pip_args', action='append', default=[],
help=('Extra arguments to send to pip.'))
def main():
args = global_parser.parse_args()
f = args.func
del args.func
f(args)
if __name__ == '__main__':
main()
|
986,601 | 0846c9ef18a892d6b0a0ec2a07b1fe4a07e22eeb | #!/usr/bin/env python
def main() :
a = 1
while 1 :
z =str(fibonaci(a))
x = len(z)
if x >= 1000 :
break
else :
a = a+1
print x
print a
#def fibonaci(n) :
#if n == 1 :
#return 1
#elif n == 2 :
#return 1
#else :
#return (fibonaci(n-1) + fibonaci(n-2))
def fibonaci(n) :
fibValues = [0,1]
for i in range(2,n+1):
fibValues.append(fibValues[i-1] + fibValues[i-2])
return fibValues[n]
if __name__ == "__main__" :
main()
|
986,602 | 4c52455c36def26b14bc79d9d3b875f16cc1be45 | from config import *
import classifier_pool as classifiers
from sklearn import preprocessing
import pickle
def main():
print("Loading Data.")
data = pickle.load(open(FEATURE_SAVE_PATH, 'rb'))
featureMatrixTraining = data["featureMatrixTraining"]
featureMatrixValidation = data["featureMatrixValidation"]
targetsTraining = data["targetsTraining"]
targetsValidation = data["targetsValidation"]
test_data = pickle.load(open(TEST_FEATURE_SAVE_PATH, 'rb'))
featureMatrixTesting = test_data["featureMatrixTesting"]
# Data Normalization
if NORMALIZE_DATA:
print("Normalizing Data.")
normalizer = preprocessing.StandardScaler()
featureMatrixTraining = normalizer.fit_transform(featureMatrixTraining)
featureMatrixValidation = normalizer.transform(featureMatrixValidation)
featureMatrixTesting = normalizer.transform(featureMatrixTesting)
print('Classification.')
print(configSummary + ", " + str(featureMatrixTraining.shape))
if ESTIMATOR_POOL['svm'] is True:
logReport = classifiers.svm(
featureMatrixTraining, targetsTraining, featureMatrixValidation, featureMatrixTesting, targetsValidation)
logReport = logReport + "\n"
if ESTIMATOR_POOL['random_forest'] is True:
logReport = classifiers.random_forest(
featureMatrixTraining, targetsTraining, featureMatrixValidation, featureMatrixTesting, targetsValidation)
logReport = logReport + "\n"
with open(LOG_FILE, 'a+') as f:
f.write("\n")
f.write(configSummary + ", " + str(featureMatrixTraining.shape))
f.write("\n")
f.write(logReport)
if __name__ == '__main__':
main()
|
986,603 | 50ab2d87b98858201d37cffe07d3ce1c2a80bf0c | import os
import shutil
if __name__ == '__main__':
base = 'data/trainval'
names = os.listdir(base)
gt = 'data/gt'
for name in names:
gt_path = os.path.join(gt, name + '.txt')
old = os.path.join(base, name)
old = os.path.join(old, 'groundtruth.txt')
shutil.move(old, gt_path)
|
986,604 | ee67faf2ec6211dc2c30502aba64427d5fa97aea | import pandas as pd
import numpy as np
import matplotlib.pyplot as pl
'''dataset_tain = pd.read_excel('Final_Train')'''
dataset = pd.read_csv('Restaurant_Reviews.tsv',delimiter = '\t', quoting = 3)
#cleaning the dataset
import re
import nltk
#nltk.download('stopwords') # stopwords is a list which consist of all the unwamted words
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus = []
for i in range(0,1000):
review = re.sub('[^a-zA-Z]',' ', dataset['Review'][i]) #removing all unwanted symbols and notations
review = review.lower() #lowercasung weach and every word
review = review.split() #splitting the words for stemming
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))] #this will remove all unwanted word from the dataset for eg (the ,this ,in for ,...etc )
#stemming also helps to derive the original word for eg (loved is being replaced by love ) it basically makes the word smpler
review = ' '.join(review)
corpus.append(review)
#apllying bag words model to the dataset
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
x = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:,1].values
#didviding inti test set and train set
from sklearn.cross_validation import train_test_split
xtrain,xtest ,ytrain, ytest = train_test_split(x ,y ,test_size = 0.20 , random_state= 0)
#tarining the model
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(xtrain,ytrain)
#prediction
y_pred = classifier.predict(xtest)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(ytest,y_pred)
|
986,605 | d55f61c9b8facb3d90197800543ea7cfb98862a9 | import xml.dom.minidom as Dom
from xml.dom import minidom
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
lb = Blueprint('writexml', __name__, url_prefix='/xml')
def writexml(annotations,filename,width,height):
doc = Dom.Document()
root_node = doc.createElement("annotation")
doc.appendChild(root_node)
f_node = doc.createElement("folder")
f_value = doc.createTextNode("cam")
f_node.appendChild(f_value)
root_node.appendChild(f_node)
ff_node = doc.createElement("filename")
ff_value = doc.createTextNode(filename)
ff_node.appendChild(ff_value)
root_node.appendChild(ff_node)
r_node = doc.createElement("relpath")
r_value = doc.createTextNode("../cam/"+filename)
r_node.appendChild(r_value)
root_node.appendChild(r_node)
head = doc.createElement("source")
d_node = doc.createElement("database")
d_value = doc.createTextNode("Unknown")
d_node.appendChild(d_value)
head.appendChild(d_node)
root_node.appendChild(head)
size= doc.createElement("size")
w_node = doc.createElement("width")
w_value = doc.createTextNode(str(width))
w_node.appendChild(w_value)
h_node = doc.createElement("height")
h_value = doc.createTextNode(str(height))
h_node.appendChild(h_value)
dp_node = doc.createElement("depth")
dp_value = doc.createTextNode("1")
dp_node.appendChild(dp_value)
size.appendChild(w_node)
size.appendChild(h_node)
size.appendChild(dp_node)
root_node.appendChild(size)
sg_node = doc.createElement("segmented")
sg_value = doc.createTextNode("0")
sg_node.appendChild(sg_value)
root_node.appendChild(sg_node)
for ann in annotations:
ob_node= doc.createElement("object")
name_node = doc.createElement("name")
name_value = doc.createTextNode(ann["tag"])
name_node.appendChild(name_value)
pose_node = doc.createElement("pose")
pose_value = doc.createTextNode("Unspecified")
pose_node.appendChild(pose_value)
tru_node = doc.createElement("truncated")
tru_value = doc.createTextNode("0")
tru_node.appendChild(tru_value)
diff_node= doc.createElement("difficult")
diff_value= doc.createTextNode("0")
diff_node.appendChild(diff_value)
bnd_node=doc.createElement("bndbox")
xmin_node = doc.createElement("xmin")
xmin_value = doc.createTextNode(str(round(ann["x"])))
xmin_node.appendChild(xmin_value)
ymin_node = doc.createElement("ymin")
ymin_value = doc.createTextNode(str(round(ann["y"])))
ymin_node.appendChild(ymin_value)
xmax_node = doc.createElement("xmax")
xmax_value = doc.createTextNode(str(round(ann["x"]+ann["width"])))
xmax_node.appendChild(xmax_value)
ymax_node = doc.createElement("ymax")
ymax_value = doc.createTextNode(str(round(ann["y"]+ann["height"])))
ymax_node.appendChild(ymax_value)
bnd_node.appendChild(xmin_node)
bnd_node.appendChild(ymin_node)
bnd_node.appendChild(xmax_node)
bnd_node.appendChild(ymax_node)
ob_node.appendChild(name_node)
ob_node.appendChild(pose_node)
ob_node.appendChild(tru_node)
ob_node.appendChild(diff_node)
ob_node.appendChild(bnd_node)
root_node.appendChild(ob_node)
print (doc.toxml("utf-8"))
terms=filename.split(".")
f = open(terms[0]+".xml", "wb")
f.write(doc.toprettyxml(indent="\t", newl="\n", encoding="utf-8"))
f.close() |
986,606 | 8c52708744ab5f4ac5f61f50179b256ab710db7f | class AioGeTuiException(Exception):
pass
class AuthSignFailed(AioGeTuiException):
"""验证签名失败"""
pass
|
986,607 | aaf49ca7049b274ccf0cad44bc5c8ab7d771011b | #POS tagger
from nltk import word_tokenize
from nltk import pos_tag
sent1 = "The race officials refused to permit the team to race today"
print(pos_tag(word_tokenize(sent1)))
sent2 = "The gentleman wants some water to water the plants"
print(pos_tag(word_tokenize(sent2)))
text = word_tokenize("They refuse to permit us to obtain the refuse permit")
import nltk
print(nltk.pos_tag(text))
#Default taggeer
import nltk
nltk.download('brown')
from nltk.corpus import brown
#getting the most common tag in the brown corpus
tags = [tag for (word,tag) in brown.tagged_words()]
most_common_tag = nltk.FreqDist(tags).max()
print(most_common_tag)
from nltk import DefaultTagger
barack = """Barack Hussein Obama (born August 4, 1961) is an American politician
who served as the 44th President of the United States from January 20, 2009, to January 20, 2017.
A member of the Democratic Party, he was the first African American to assume the presidency
and previously served as a United States Senator from Illinois (2005–2008)."""
tokenized_barack = word_tokenize(barack)
default_tagger = DefaultTagger(most_common_tag)
def_tagged_barack = default_tagger.tag(tokenized_barack)
print(def_tagged_barack)
#Lookup Tagger
#Ngram tagger
message = "the quick brown fox jumped over the lazy dog"
training_tag = pos_tag(word_tokenize(message))
print(training_tag)
#training the ngram tagger
ngram_tagger = nltk.NgramTagger(n=2,train=[training_tag])
message2 = "the lazy dog jumped over the quick brown fox"
message2_tags = ngram_tagger.tag(word_tokenize(message2))
print(message2_tags)
print(list(nltk.ngrams(pos_tag(word_tokenize(message)),n=2)))
#Unigram tagger
barack = """Barack Hussein Obama II born August 4, 1961) is an American politician
who served as the 44th President of
the United States from January 20, 2009, to January 20, 2017.
A member of the Democratic Party, he was the
first African American to assume the presidency and previously
served as a United States Senator from Illinois (2005–2008)."""
bush = """George Walker Bush (born July 6, 1946) is an American politician who served as the 43rd President
of the United States from 2001 to 2009.
He had previously served as the 46th Governor of Texas from 1995 to 2000.
Bush was born New Haven, Connecticut, and grew up in Texas.
After graduating from Yale University in 1968 and Harvard Business School in 1975, he worked in the oil industry.
Bush married Laura Welch in 1977 and unsuccessfully ran for the U.S. House of Representatives shortly thereafter.
He later co-owned the Texas Rangers baseball team before defeating Ann Richards in the 1994 Texas gubernatorial election.
Bush was elected President of the United States in 2000 when he defeated Democratic incumbent
Vice President Al Gore after a close and controversial win that involved a stopped recount in Florida.
He became the fourth person to be elected president while receiving fewer popular votes than his opponent.
Bush is a member of a prominent political family and is the eldest son of Barbara and George H. W. Bush,
the 41st President of the United States.
He is only the second president to assume the nation's highest office after his father, following the footsteps
of John Adams and his son, John Quincy Adams.
His brother, Jeb Bush, a former Governor of Florida, was a candidate for the Republican presidential nomination
in the 2016 presidential election.
His paternal grandfather, Prescott Bush, was a U.S. Senator from Connecticut."""
pos_tag_barack = pos_tag(word_tokenize(barack))
pos_tag_bush = pos_tag(word_tokenize(bush))
trump = """Donald John Trump (born June 14, 1946) is the 45th and current President of the United States.
Before entering politics, he was a businessman and television personality.
Trump was born and raised in the New York City borough of Queens, and received an economics degree from the
Wharton School of the University of Pennsylvania.
He took charge of his family's real estate business in 1971, renamed it The Trump Organization, and expanded
it from Queens and Brooklyn into Manhattan.
The company built or renovated skyscrapers, hotels, casinos, and golf courses.
Trump later started various side ventures, including licensing his name for real estate and consumer products.
He managed the company until his 2017 inauguration.
He co-authored several books, including The Art of the Deal. He owned the Miss Universe and Miss USA beauty
pageants from 1996 to 2015, and he produced and hosted the reality television show The Apprentice from 2003 to 2015.
Forbes estimates his net worth to be $3.1 billion."""
unigram_tag = nltk.UnigramTagger(train=[pos_tag_barack,pos_tag_bush])
trump_tags = unigram_tag.tag(word_tokenize(trump))
print(trump_tags)
#Tagging pipeline and backoff
default_tagger = DefaultTagger('NN')
patterns = [
(r'.*\'s$','NN$'), #possesive nouns
(r'.*es$','VBZ'), #3rd singular present
(r'^-?[0-9]+(.[0-9]+)?$','CD'), #cardinal numbers
(r'[Aa][Nn][Dd]','CC'), #conjugate and
(r'.*ed$','VBD'), #simple past
(r',',','), #comma
(r'.*ould$','MD'), #modals
(r'.*ing$','VBG'), #gerunds
(r'.*s$','NNS'), #plural nouns
]
regexp_tagger = nltk.RegexpTagger(patterns,backoff=default_tagger)
unigram_tag = nltk.UnigramTagger(train=[pos_tag_barack,pos_tag_bush],backoff=regexp_tagger)
trump_tags = unigram_tag.tag(word_tokenize(trump))
print(trump_tags)
|
986,608 | 501508afad6c1a42172c575ac6e1d37161f1d6ec | platform_info = {
'id' : 'hampton',
'location' : 'Hampton Shoals, Neuse River, NC',
'lat' : 35.0184, # degrees true (-) south, (+) north
'lon' : -76.9409, # degrees true (-) west, (+) east
'mvar' : -9.80, # degrees (-) west, (+) east
'institution' : 'nccoos',
#
'config_start_date' : '2005-08-12 00:00:00',
'config_end_date' : '2008-09-03 00:00:00', # None or yyyy-mm-dd HH:MM:SS
'packages' : ('avp', 'met'),
}
sensor_info = {
'avp' : { 'id' : 'avp',
'description' : 'Automated profiler data ctd and water quality',
'raw_dir' : '/seacoos/data/nccoos/level0/hampton/avp/',
'raw_file_glob' : '*.dat',
'proc_dir' : '/seacoos/data/nccoos/level1/hampton/avp/',
'process_module' : 'proc_avp_ysi_6600_v1_CDL2',
'utc_offset' : 5., # hours offset to Eastern Standard
'bin_size' : 0.1, # meters
'nbins' : 150, # max number of samples in profile
# 'latest_dir' : '/seacoos/data/nccoos/latest_v2.0',
# 'latest_vars' : ('time','lat','lon','z','wtemp','salin'),
},
'met' : { 'id' : 'met',
'description' : 'Wind Data at Automated Vertical Profiler Station',
'raw_dir' : '/seacoos/data/nccoos/level0/hampton/met/',
'raw_file_glob' : '*.wnd',
'proc_dir' : '/seacoos/data/nccoos/level1/hampton/met/',
'process_module' : 'proc_avp_ascii_met',
'utc_offset' : 5., # hours offset to Eastern Standard
'anemometer_height' : 2., # meters
'latest_dir' : '/seacoos/data/nccoos/latest_v2.0',
'latest_vars' : ('time','lat','lon','z','u','v','wspd', 'wdir'),
},
}
|
986,609 | 0b5f2d9628cbd140f264d0c676dfacc8600792e4 | import math
def round_d(number:float, decimals:int=2):
"""
Returns a value rounded down to a specific number of decimal places.
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more")
elif decimals == 0:
return math.floor(number)
factor = 10 ** decimals
return math.floor(number * factor) / factor
class Trade:
_taker_fee = 0.0007
_maker_fee = 0.0002
def __init__(self):
self.entry = 0
self.target = 0
self.exit = 0
self.risk = 0
self.direction = None
def update(self):
pass
def print_all(self):
print({"entry": self.entry, "target": self.target, "exit": self.exit, "risk": self.risk, "direction": self.direction})
def update_target(self,x):
self.target = x
self.set_dir()
def update_exit(self, x):
self.exit = x
self.set_dir()
def update_entry(self, x):
self.entry = x
def update_risk(self, x):
self.risk = -x
def check_step(self):
return all(i for i in [self.entry,self.target,self.exit,self.risk])
#risk is defined as a % of the bankroll to be risked. we can double check it though.
def set_dir(self):
if self.check_step():
if self.entry > self.exit:
self.direction = "LONG"
else:
self.direction = "SHORT"
def get_dir(self):
return self.direction
def get_rr(self):
if self.check_step():
return round_d(-1*self.target_return()/self.exit_return(),4)
def get_position_size(self):
if self.check_step():
if self.get_dir() == "LONG":
return round_d(self.risk/(self.exit - self.entry - self.market_fee(self.exit) - self.market_fee(self.entry)),4)
else:
return round_d(self.risk/(self.entry - self.exit - self.market_fee(self.exit) - self.market_fee(self.entry)),4)
# if entry_usd > exit_usd: #the trade is LONG
# return ("Long",round(target_risk_usd/(exit_usd - entry_usd - market_fee(entry_usd) - market_fee(exit_usd)),4))
# else: #the trade is SHORT
# return ("Short",round(target_risk_usd/(entry_usd - exit_usd - market_fee(entry_usd) - market_fee(exit_usd)),4))
def target_return(self):
if self.check_step():
if self.get_dir() == "LONG":
return round_d(self.get_position_size() * (self.target - self.entry - self.limit_fee(self.target) - self.market_fee(self.entry)),4)
else:
return round_d(self.get_position_size() * (self.entry - self.target - self.limit_fee(self.target) - self.market_fee(self.entry)),4)
#aka get actual risk
def exit_return(self):
if self.check_step():
if self.get_dir() == "LONG":
return round_d(self.get_position_size() * (self.exit - self.entry - self.limit_fee(self.exit) - self.market_fee(self.entry)),4)
else:
return round_d(self.get_position_size() * (self.entry - self.exit - self.limit_fee(self.exit) - self.market_fee(self.entry)),4)
def market_fee(self, price):
return price * self._taker_fee
def limit_fee(self, price):
return price * self._maker_fee
#assumes risk = negative number
def get_position_size(self):
if self.check_step():
if self.direction:
if self.direction == "LONG":
return round_d(self.risk/(self.exit - self.entry - self.market_fee(self.exit) - self.market_fee(self.entry)),4)
else:
return round_d(self.risk/(self.entry - self.exit - self.market_fee(self.exit) - self.market_fee(self.entry)),4)
return None
# if entry_usd > exit_usd: #the trade is LONG
# return ("Long",round(target_risk_usd/(exit_usd - entry_usd - market_fee(entry_usd) - market_fee(exit_usd)),4))
# else: #the trade is SHORT
# return ("Short",round(target_risk_usd/(entry_usd - exit_usd - market_fee(entry_usd) - market_fee(exit_usd)),4))
if __name__ == "__main__":
trade = Trade(5900,6000,5800,-3.30)
print(trade.get_position_size())
print(trade.get_dir())
print(trade.target_return())
print(trade.exit_return())
print(trade.get_rr())
print()
trade.update(5900,5800,6000,-3.30)
print(trade.get_position_size())
print(trade.get_dir())
print(trade.target_return())
print(trade.exit_return())
print(trade.get_rr())
#print("short risk = $3.30 entry = 5900, exit = 6000")
#print(get_position_size(-3.3,5900,6000))
#print("long risk = $3.30 entry = 5900, exit = 5800")
#print(get_position_size(-3.3,5900,5800)) |
986,610 | ca2a55d4728b76dd0d5ecff5d663c4b953c9ef6e | from flask import Flask, request, jsonify
from flask_httpauth import HTTPBasicAuth
import io
from flask_restplus import Api, Resource, fields, Namespace
from flask_cors import CORS
import requests
import os
from .otstamp import TimeStamp
app = Flask(__name__)
auth = HTTPBasicAuth()
authorizations = {
'token': {
'type': 'apiKey',
'in': 'header',
'name': 'token'}
}
API_TITLE = os.environ['API_TITLE']
API_DESCRIPTION = os.environ['API_DESCRIPTION']
api = Api(app, version='.1', title=API_TITLE,
description=API_DESCRIPTION,
authorizations=authorizations
)
# Routes
import os
import jwt
from functools import wraps
from flask import make_response, jsonify
PUBLIC_KEY = os.environ['PUBLIC_KEY']
def requires_auth(roles):
def requires_auth_decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
def decode_token(token):
return jwt.decode(token.encode("utf-8"), PUBLIC_KEY, algorithms='RS256')
try:
decoded = decode_token(str(request.headers['Token']))
except Exception as e:
post_token = False
if request.json != None:
if 'token' in request.json:
try:
decoded = decode_token(request.json.get('token'))
post_token=True
except Exception as e:
return make_response(jsonify({'message': str(e)}),401)
if not post_token:
return make_response(jsonify({'message': str(e)}), 401)
if set(roles).isdisjoint(decoded['roles']):
return make_response(jsonify({'message': 'Not authorized for this endpoint'}),401)
return f(*args, **kwargs)
return decorated
return requires_auth_decorator
ns_token = Namespace('test', description='Tests')
@ns_token.route('/auth/')
class ResourceRoute(Resource):
@ns_token.doc('token_resource',security='token')
@requires_auth(['user','moderator','admin'])
def get(self):
return jsonify({'message': 'Success'})
@ns_token.route('/')
class TestRoute(Resource):
@ns_token.doc('resource',security='token')
def get(self):
return jsonify({'message': 'Success'})
ns_save = Namespace('timestamp', description='Timestamp')
save_model = ns_save.model("collection", {
"url": fields.String(),
})
@ns_save.route('/')
class SaveRoute(Resource):
@ns_save.doc('save',security='token')
@requires_auth(['moderator','admin'])
@ns_save.expect(save_model)
def post(self):
url = request.get_json()['url']
timestamp = TimeStamp(io.BytesIO(url.encode('utf-8')))
print(timestamp)
return 'woo'
###
api.add_namespace(ns_token)
api.add_namespace(ns_save)
|
986,611 | 8f4bec513cbf5a706d71913d1a88e287fca1e980 | from pathlib import Path
import numpy as np
def read_lut(path):
assert Path(path).exists()
data = []
size = -1
for line in Path(path).read_text().split("\n"):
if line.strip() == "":
continue
elif line[0] == "#":
continue
elif line.split()[0] == "LUT_1D_SIZE":
size = int( line.split()[1] )
kind = "1D"
elif line.split()[0] == "LUT_3D_SIZE":
size = int( line.split()[1] )
kind = "3D"
else:
data.append( [float(part) for part in line.split()] )
return np.array(data).reshape(size, size, size, 3).astype(np.float32)
if __name__ == "__main__":
lut = read_lut("../../tests/resources/AlexaV3_K1S1_LogC2Video_Rec709_EE_aftereffects3d.cube")
print(lut.shape) |
986,612 | ee9603baaed271fdf0b99f3a33dc42992dc5f299 | """This module contains error handler utility functions."""
from flask import Blueprint
from flask.json import jsonify
errors = Blueprint('errors', __name__)
@errors.app_errorhandler(Exception)
def handle_exceptions(e):
return {"message": "exception occured"}
@errors.app_errorhandler(404)
def not_found(e):
"""404 Error Handler."""
return generic_errors("Not found", code=404)
@errors.app_errorhandler(500)
def server_error(e):
"""500 Error Handler."""
return generic_errors("server error", code=500)
@errors.app_errorhandler(405)
def method_error(e):
"""405 Error Handler."""
return generic_errors("method not allowed", code=405)
@errors.app_errorhandler(ValueError)
def handle_invalid_arguments(e):
"""Handle malformed request data errors."""
errors = e.message
return generic_errors(errors, code=400)
def generic_errors(error, code):
"""Utility function that assemble's error responses."""
errors = {}
errors["error"] = error
response = jsonify(errors)
response.status_code = code
return response
|
986,613 | b7dd5c6ec6a0ce54533cde1b526334a03f8b0e7e | from gym.envs.registration import register
register(
id='ps-v0',
entry_point='gym_ps.envs:PSenv',
) |
986,614 | 5d608079a33ac4e3fa789271712e5c4677ba34e0 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 29 16:07:30 2018
@author: fabiangnegel
"""
import numpy as np
import scipy
import cplex
from cplex.exceptions import CplexSolverError
seeds=range(1,6)
dupRanges={'small':[5,15],'medium':[15,25],'large':[20,50]}
#dupRanges={'small':[5,15],'large':[20,50]}
#bounds={'cat':[10,1,0],'mixed':[10,1,8]}
solDict={}
solTimes={}
#seeds=range(7,12)
#dupRanges={'medium':[15,25]}
bounds={'num':[0,0,8],'cat':[6,1,0],'mixed':[3,1,6]}
#bounds={'cat':[6,1,0]}
#bounds={'num':[0,0,8]}
#dupRanges={'large':[20,50]}
for ending in ['_friction_DA_I']:
for key1,dupRange in dupRanges.iteritems():
for key2,boundAmount in bounds.iteritems():
times=[]
seedList=[]
gapList=[]
seeds=range(1,6)
#if key1=='small' and (key2=='cat' or key2 == 'mixed'):
# seeds=range(2,7)
#if key1=='medium' and (key2=='cat' or key2 == 'mixed'):
# seeds=[1,3,8,9,10]
#if key1=='large' and (key2=='cat' or key2 == 'mixed'):
# seeds=[1,2,3,4,7]
for seed in seeds:
model=cplex.Cplex()
#filename=key1+"_"+key2+"_seed_%d" % seed+'_no_friction'
filename=key1+"_"+key2+"_seed_%d" % seed+ending
model.read("lpfiles/"+filename+".lp")
model.set_results_stream('logs/'+filename+'.rlog')
model.parameters.timelimit.set(3600.0)
#model.parameters.threads(1)
start=model.get_time()
try:
model.solve()
end=model.get_time()
status=model.solution.get_status()
duration=end-start
fh = open('logs/'+filename+'.rlog','a')
fh.write("\n"+model.solution.get_status_string())
fh.close()
if (status == 101 or status== 102):
times.append(duration)
gapList.append(model.solution.MIP.get_mip_relative_gap())
else:
if duration < 3600:
times.append(-duration)
gapList.append(-1)
else:
times.append(duration)
gapList.append(model.solution.MIP.get_mip_relative_gap())
seedList.append(seed)
except CplexSolverError, exc:
print "** Exception: ",exc
#fh = open('logs/'+filename+'.rlog','a')
#fh.write("\n"+str(model.solution.get_objective_value()))
#fh.close()
gapArray=np.transpose(np.array([gapList]))
seedArray=np.transpose(np.array([seedList]))
timeArray=np.transpose(np.array([times]))
scipy.io.savemat('times/'+key1+'_'+key2+ending+'2',dict([('times2',timeArray),('seeds2',seedArray),('gaps2',gapArray)]))
#scipy.io.savemat('/Users/fabiangnegel/MIPDECO/Feuerprojekt/Results/contaStateFullxn%dtn%ds%d.mat' % (xn,tn,5), dict([('x_k',x),('duration',duration),('objective',model.solution.get_objective_value()),('gap',model.solution.MIP.get_mip_relative_gap())])) |
986,615 | e73f6f57f8718365f4f11d0d0a78ea2068e47824 | import asyncio
import time
async def coroutine_A():
print('coroutine_A starts!')
await asyncio.sleep(3)
print('coroutine_A completed!')
async def coroutine_B():
print('coroutine_B starts!')
await asyncio.sleep(2)
print('coroutine_B completed!')
def main():
loop = asyncio.get_event_loop()
tasks = asyncio.gather(
coroutine_A(),
coroutine_B()
)
loop.run_until_complete(tasks)
loop.close()
time_start = time.time()
main()
time_end = time.time()
print(f'Took {time_end - time_start}') |
986,616 | 29285a8dc1ed6644ac679c733a2b9c41920ebe28 | from multiprocessing.managers import BaseManager
import NBM2_functions as nbmf
import step0_functions as s0f
import sqlalchemy as sal
import geopandas as gpd
import pandas as pd
import traceback
import pickle
import json
import time
import csv
import os
import gc
def changeTogeom(db_config, config, start_time):
"""
changes the name of the "GEOMETRY" COLUMN in the block table to "geom"
so it can be processed by SQLAlchemy (which appears to have a hard time
managing anything other than "geom" for geometry names)
Arguments In:
db_config: a dictionary that contains the configuration
information for the database and queue
config: a dictionary that contains the configuration
information of various steps of NMB2 data
processing
start_time: a time structure variable that indicates when
the current step started
Arguments Out:
continue_run: a boolean variable that indicates if the routine
successfully completed and whether the next
steps should be exectuted
"""
try:
temp_time = time.localtime()
# make the connection string
connection_string = 'postgresql://%s:%s@%s:%s/%s' %\
(db_config['db_user'], db_config['db_password'],
db_config['db_host'], db_config['db_port'], db_config['db'])
engine = sal.create_engine(connection_string)
with engine.connect() as conn, conn.begin():
sql_string ="""
SELECT column_name
FROM information_schema.columns
WHERE table_schema = '%s'
AND table_name='nbm2_block_%s'
AND column_name='geom';
""" % (db_config['db_schema'],config['census_vintage'])
column_exists = conn.execute(sql_string)
if len([c[0] for c in column_exists]) == 0:
sql_string = """
ALTER TABLE {0}.nbm2_block_{1}
RENAME COLUMN "GEOMETRY" TO geom; COMMIT;
""".format(db_config['db_schema'], config['census_vintage'])
conn.execute(sql_string)
my_message = """
INFO - STEP 0 (MASTER): TASK 6 OF 13 - CHANGED "GEOMETRY" TO geom
"""
my_message = ' '.join(my_message.split())
print(nbmf.logMessage(my_message, temp_time, time.localtime(),
time.mktime(time.localtime())-time.mktime(start_time)))
del engine
gc.collect()
return True
except:
my_message = """
ERROR - STEP 0 (MASTER): TASK 6 OF 13 - FAILED TO CHANGE "GEOMETRY"
TO geom
"""
my_message = ' '.join(my_message.split()) + '\n' + traceback.format_exc()
print(nbmf.logMessage(my_message, temp_time, time.localtime(),
time.mktime(time.localtime())-time.mktime(start_time)))
return False
def changeToGEOMETRY(config, db_config, start_time):
"""
changes the name of the "geom" COLUMN in the block table back to
"GEOMETRY" so it is consistent with the other tables.
Arguments In:
db_config: a dictionary that contains the configuration
information for the database and queue
config: a dictionary that contains the configuration
information of various steps of NMB2 data
processing
start_time: a time structure variable that indicates when
the current step started
Arguments Out:
continue_run: a boolean variable that indicates if the routine
successfully completed and whether the next
steps should be exectuted
"""
try:
temp_time = time.localtime()
# make the connection string
connection_string = 'postgresql://%s:%s@%s:%s/%s' %\
(db_config['db_user'], db_config['db_password'],
db_config['db_host'], db_config['db_port'], db_config['db'])
engine = sal.create_engine(connection_string)
with engine.connect() as conn, conn.begin():
sql_string ="""
SELECT column_name
FROM information_schema.columns
WHERE table_schema = '%s'
AND table_name='nbm2_block_%s'
AND column_name='geom';
""" % (db_config['db_schema'],config['census_vintage'])
column_exists = conn.execute(sql_string)
if len([c[0] for c in column_exists]) == 1:
sql_string = """
ALTER TABLE {0}.nbm2_block_{1}
RENAME COLUMN geom TO "GEOMETRY"; COMMIT;
""".format(db_config['db_schema'], config['census_vintage'])
conn.execute(sql_string)
my_message = """
INFO - STEP 0 (MASTER): TASK 6 OF 13 - CHANGED geom BACK TO
"GEOMETRY"
"""
my_message = ' '.join(my_message.split())
print(nbmf.logMessage(my_message, temp_time, time.localtime(),
time.mktime(time.localtime())-time.mktime(start_time)))
del engine
gc.collect()
return True
except:
my_message = """
ERROR - STEP 0 (MASTER): TASK 6 OF 13 - FAILED TO CHANGE geom BACK
TO "GEOMETRY"
"""
my_message = ' '.join(my_message.split()) + '\n' + traceback.format_exc()
print(nbmf.logMessage(my_message, temp_time, time.localtime(),
time.mktime(time.localtime())-time.mktime(start_time)))
return False
def getCounty_fips(config, start_time):
"""
retrieves the list of county fips from a csv file
Arguments In:
config: a dictionary that contains the configuration
information of various steps of NMB2 data
processing
start_time: a time structure variable that indicates when
the current step started
Arguments Out:
continue_run: a boolean variable that indicates if the routine
successfully completed and whether the next
steps should be exectuted
county_fips: a list of county FIPS codes used to parse large
data objects into smaller county level packages
"""
try:
temp_time = time.localtime()
county_fips = []
with open(config['temp_csvs_dir_path']+'county_fips.csv','r') as my_file:
my_reader = csv.reader(my_file)
for row in my_reader:
county_fips.append(row[0])
my_message = """
INFO - STEP 0 (MASTER): TASK 6 OF 13 - ESTABLISHED LIST OF COUNTY
FIPS
"""
my_message = ' '.join(my_message.split())
print(nbmf.logMessage(my_message,temp_time, time.localtime(),
time.mktime(time.localtime()) - time.mktime(start_time)))
del my_reader
gc.collect()
return True, county_fips
except:
my_message = """
ERROR - STEP 0 (MASTER): TASK 6 OF 13 - FAILED TO FIND LIST OF
COUNTY FIPS
"""
my_message = ' '.join(my_message.split()) + '\n' + traceback.format_exc()
print(nbmf.logMessage(my_message,temp_time, time.localtime(),
time.mktime(time.localtime()) - time.mktime(start_time)))
return False, None
def loadBlockQueue(input_queue, county_fips, config, start_time):
"""
loads the input queue with the data from the county fips file so the
county level block data geojsons can be created
Arguments In:
input_queue: a multiprocessing queue that can be shared across
multiple servers and cores. All information to be
processed is loaded into the queue
county_fips: a list of county FIPS codes used to parse large
data objects into smaller county level packages
config: a dictionary that contains the configuration
information of various steps of NMB2 data
processing
start_time: a time structure variable that indicates when
the current step started
Arguments Out:
continue_run: a boolean variable that indicates if the routine
successfully completed and whether the next
steps should be exectuted
"""
try:
temp_time = time.localtime()
county_counter = 0
for c in county_fips:
input_queue.put((c))
county_counter += 1
my_message = """
INFO - STEP 0 (MASTER): TASK 6 OF 13 - COMPLETED LOADING INPUT
QUEUE WITH COUNTY DATA
"""
my_message = ' '.join(my_message.split())
print(nbmf.logMessage(my_message,temp_time, time.localtime(),
time.mktime(time.localtime()) - time.mktime(start_time)))
return True, county_counter
except:
my_message = """
ERROR - STEP 0 (MASTER): TASK 6 OF 13 - FAILED TO LOADING QUEUE WITH
COUNTY DATA
"""
my_message = ' '.join(my_message.split()) + '\n' + traceback.format_exc()
print(nbmf.logMessage(my_message,temp_time, time.localtime(),
time.mktime(time.localtime()) - time.mktime(start_time)))
return False, None
def makeBlockTablePickle(config, db_config, start_time):
"""
creates a pickle (serialized object) of the block data so that
distributed processes can quickly read the data instead of going to the
database
Arguments In:
config: a dictionary that contains the configuration
information of various steps of NMB2 data
processing
db_config: a dictionary that contains the configuration
information for the database and queue
start_time: a time structure variable that indicates when
the current step started
Arguments Out:
continue_run: a boolean variable that indicates if the routine
successfully completed and whether the next
steps should be exectuted
"""
try:
temp_time = time.localtime()
# make the connection string
connection_string = 'postgresql://%s:%s@%s:%s/%s' %\
(db_config['db_user'], db_config['db_password'],
db_config['db_host'], db_config['db_port'], db_config['db'])
# build the query that will make the block table pickle
engine = sal.create_engine(connection_string)
sql_string = """
SELECT CAST("BLOCK_FIPS" AS TEXT) as geoid{0}, "ALAND{0}", geom
FROM {1}.nbm2_block_{2}
""".format( config['census_vintage'][2:], db_config['db_schema'],
config['census_vintage'])
# load the data into a dataframe
starting_crs={'init':'epsg:%s' % db_config['SRID']}
block_df = gpd.read_postgis(sql_string, engine, crs=starting_crs)
my_message = """
INFO - STEP 0 (MASTER): TASK 6 OF 13 - READ IN BLOCK DATA TABLE
"""
my_message = ' '.join(my_message.split())
print(nbmf.logMessage(my_message, temp_time, time.localtime(),
time.mktime(time.localtime())-time.mktime(start_time)))
# write out the pickle
temp_time = time.localtime()
with open(config['temp_pickles']+'block_df.pkl','wb') as my_pickle:
pickle.dump(block_df, my_pickle)
my_message = """
INFO - STEP 0 (MASTER): TASK 6 OF 13 - PICKLED OFF THE BLOCK_TABLE
"""
my_message = ' '.join(my_message.split())
print(nbmf.logMessage(my_message, temp_time, time.localtime(),
time.mktime(time.localtime())-time.mktime(start_time)))
block_df = None
del block_df
engine = None
del engine
my_pickle = None
del my_pickle
gc.collect()
return True
except:
my_message = """
ERROR - STEP 0 (MASTER): TASK 6 OF 13 - COULD NOT PICKLE OFF THE
BLOCK_TABLE
"""
my_message = ' '.join(my_message.split()) + '\n' +\
traceback.format_exc()
print(nbmf.logMessage(my_message, temp_time, time.localtime(),
time.mktime(time.localtime())-time.mktime(start_time)))
del block_df
gc.collect()
return False
def breakOutBlockData(input_queue, output_queue, message_queue, config,
db_config, start_time):
"""
main subroutine that manages the creation of the county level block data
files
Arguments In:
input_queue: a multiprocessing queue that can be shared across
multiple servers and cores. All information to be
processed is loaded into the queue
output_queue: a multiprocessing queue that can be shared across
multiple servers and cores. All results from the
various processes are loaded into the queue
message_queue: a multiprocessing queue variable that is used to
communicate between the master and servants
config: a dictionary that contains the configuration
information of various steps of NMB2 data
processing
db_config: a dictionary that contains the configuration
information for the database and queue
start_time: a time structure variable that indicates when
the current step started
Arguments Out:
continue_run: a boolean variable that indicates if the routine
successfully completed and whether the next
steps should be exectuted
"""
try:
temp_time = time.localtime()
continue_run, county_fips = getCounty_fips(config, start_time)
if continue_run:
continue_run = changeTogeom(db_config, config, start_time)
if continue_run:
continue_run = makeBlockTablePickle(config, db_config, start_time)
if continue_run:
for _ in range(config['number_servers']):
message_queue.put('parse_blockdf')
continue_run, county_counter = loadBlockQueue(input_queue, county_fips,
config, start_time)
if continue_run:
continue_run = s0f.processWork(config, input_queue, output_queue,
county_counter, start_time)
if continue_run:
continue_run = changeToGEOMETRY(config, db_config, start_time)
if continue_run:
my_message = """
INFO - STEP 0 (MASTER): TASK 6 OF 13 - COMPLETED CREATING COUNTY
LEVEL GEOJSON BLOCK FILES
"""
my_message = ' '.join(my_message.split())
print(nbmf.logMessage(my_message,temp_time, time.localtime(),
time.mktime(time.localtime()) - time.mktime(start_time)))
gc.collect()
return True
else:
my_message = """
ERROR - STEP 0 (MASTER): TASK 6 OF 13 - FAILED TO CREATE COUNTY
LEVEL GEOJSON BLOCK FILES
"""
my_message = ' '.join(my_message.split()) + '\n' + traceback.format_exc()
print(nbmf.logMessage(my_message,temp_time, time.localtime(),
time.mktime(time.localtime()) - time.mktime(start_time)))
return False
except:
my_message = """
ERROR - STEP 0 (MASTER): TASK 6 OF 13 - FAILED TO CREATE COUNTY
LEVEL GEOJSON BLOCK FILES
"""
my_message = ' '.join(my_message.split()) + '\n' + traceback.format_exc()
print(nbmf.logMessage(my_message,temp_time, time.localtime(),
time.mktime(time.localtime()) - time.mktime(start_time)))
return False |
986,617 | 82035eed3bfa9ff12ed432584b483d34f859cbaa | # -*- coding: utf-8 -*-
# LeetCode 1078-Bigram分词
"""
Created on Sun Dec 26 17:45 2021
@author: _Mumu
Environment: py38
"""
from typing import List
class Solution:
def findOcurrences(self, text: str, first: str, second: str) -> List[str]:
flag = -1
ans = []
for word in text.split(' '):
if flag == 1:
ans.append(word)
if first == second:
flag = 0
else:
flag = -1
if word == second and flag == 0:
flag = 1
elif word == first:
flag = 0
else:
flag = -1
return ans
|
986,618 | 326a3b6bb8e33a2df2244afcfe54fad407c2f92a | import sys
from collections import deque
input = sys.stdin.readline
# solution
def bfs():
while q:
cur = q.popleft()
if cur == K:
return visited[cur]
pushnext(cur+1, cur)
pushnext(cur-1, cur)
pushnext(cur*2, cur)
def pushnext(nxt, cur):
if maxsize > nxt >= 0 and visited[cur]+1 < visited[nxt]:
visited[nxt] = visited[cur]+1
q.append(nxt)
# main
if __name__ == "__main__":
N, K = map(int, input().split())
maxsize = 100001
visited = [maxsize]*maxsize
visited[N] = 0
q = deque([N])
print(bfs()) |
986,619 | 14bae44bbbafde0bbb656cb7bb891e3a3c9402d0 | import mariadb
import dbcreds
# def noticeTweetLike(user_id):
# conn = None
# cursor = None
# try:
# conn = mariadb.connect(user=dbcreds.user, password=dbcreds.password, host=dbcreds.host, port=dbcreds.port, database=dbcreds.database)
# cursor = conn.cursor()
# print(user_id)
# cursor.execute("SELECT id FROM tweets WHERE user_id=?", [user_id])
# rows = cursor.fetchall()
# tweets=[]
# for row in rows:
# cursor.execute("SELECT COUNT(*) FROM tweet_like WHERE tweet_id=? AND notice=1", [row[0]])
# count = cursor.fetchall()
# tweet={
# "tweet_id": row[0],
# "newLike": count[0][0]
# }
# tweets.append(tweet)
# except mariadb.ProgrammingError:
# print("program error...")
# except mariadb.DataError:
# print("Data error...")
# except mariadb.DatabaseError:
# print("Database error...")
# except mariadb.OperationalError:
# print("connect error...")
# finally:
# if(cursor != None):
# cursor.close()
# if(conn != None):
# conn.rollback()
# conn.close()
# return tweets
# def editTweetLike(tweet_id, likeUser_id):
# conn = None
# cursor = None
# row = None
# user_id = None
# amount = None
# try:
# conn = mariadb.connect(user=dbcreds.user, password=dbcreds.password, host=dbcreds.host, port=dbcreds.port, database=dbcreds.database)
# cursor = conn.cursor()
# cursor.execute("SELECT user_id FROM token WHERE token=?", [token,])
# user_id = cursor.fetchone()[0]
# if user_id != None:
# cursor.execute("UPDATE tweet_like SET notice = 0 WHERE tweet_id=? AND user_id=?", [tweet_id, likeUser_id])
# row = cursor.rowcount
# except mariadb.ProgrammingError:
# print("program error...")
# except mariadb.DataError:
# print("Data error...")
# except mariadb.DatabaseError:
# print("Database error...")
# except mariadb.OperationalError:
# print("connect error...")
# finally:
# if(cursor != None):
# cursor.close()
# if(conn != None):
# conn.rollback()
# conn.close()
# if row == 1:
# return True
# else:
# return False
def clearFollowNotice(token, following_id):
conn = None
cursor = None
row = None
user_id = None
try:
conn = mariadb.connect(user=dbcreds.user, password=dbcreds.password, host=dbcreds.host, port=dbcreds.port, database=dbcreds.database)
cursor = conn.cursor()
cursor.execute("SELECT user_id FROM token WHERE token=?", [token,])
user_id = cursor.fetchone()[0]
if user_id != None:
print(user_id)
print(following_id)
cursor.execute("UPDATE user_follows SET notice=0 WHERE id=? AND following_id=?", [following_id, user_id])
conn.commit()
row = cursor.rowcount
except mariadb.ProgrammingError:
print("program error...")
except mariadb.DataError:
print("Data error...")
except mariadb.DatabaseError:
print("Database error...")
except mariadb.OperationalError:
print("connect error...")
finally:
if(cursor != None):
cursor.close()
if(conn != None):
conn.rollback()
conn.close()
if row == 1:
return True
else:
return False
def clearCommentNotice(token, comment_id):
conn = None
cursor = None
row = None
user_id = None
try:
conn = mariadb.connect(user=dbcreds.user, password=dbcreds.password, host=dbcreds.host, port=dbcreds.port, database=dbcreds.database)
cursor = conn.cursor()
cursor.execute("SELECT user_id FROM token WHERE token=?", [token,])
user_id = cursor.fetchone()[0]
if user_id != None:
print(user_id)
print(comment_id)
cursor.execute("UPDATE comments SET notice=0 WHERE id=?", [comment_id,])
conn.commit()
row = cursor.rowcount
except mariadb.ProgrammingError:
print("program error...")
except mariadb.DataError:
print("Data error...")
except mariadb.DatabaseError:
print("Database error...")
except mariadb.OperationalError:
print("connect error...")
finally:
if(cursor != None):
cursor.close()
if(conn != None):
conn.rollback()
conn.close()
if row == 1:
return True
else:
return False
def clearLikeNotice(token, like_id):
conn = None
cursor = None
row = None
user_id = None
try:
conn = mariadb.connect(user=dbcreds.user, password=dbcreds.password, host=dbcreds.host, port=dbcreds.port, database=dbcreds.database)
cursor = conn.cursor()
cursor.execute("SELECT user_id FROM token WHERE token=?", [token,])
user_id = cursor.fetchone()[0]
if user_id != None:
cursor.execute("UPDATE tweet_like SET notice=0 WHERE id=?", [like_id,])
conn.commit()
row = cursor.rowcount
except mariadb.ProgrammingError:
print("program error...")
except mariadb.DataError:
print("Data error...")
except mariadb.DatabaseError:
print("Database error...")
except mariadb.OperationalError:
print("connect error...")
finally:
if(cursor != None):
cursor.close()
if(conn != None):
conn.rollback()
conn.close()
if row == 1:
return True
else:
return False
def clearNestcommentNotice(token, com_comment_id):
conn = None
cursor = None
row = None
user_id = None
try:
conn = mariadb.connect(user=dbcreds.user, password=dbcreds.password, host=dbcreds.host, port=dbcreds.port, database=dbcreds.database)
cursor = conn.cursor()
cursor.execute("SELECT user_id FROM token WHERE token=?", [token,])
user_id = cursor.fetchone()[0]
if user_id != None:
cursor.execute("UPDATE com_comment SET notice=0 WHERE id=?", [com_comment_id,])
conn.commit()
row = cursor.rowcount
except mariadb.ProgrammingError:
print("program error...")
except mariadb.DataError:
print("Data error...")
except mariadb.DatabaseError:
print("Database error...")
except mariadb.OperationalError:
print("connect error...")
finally:
if(cursor != None):
cursor.close()
if(conn != None):
conn.rollback()
conn.close()
if row == 1:
return True
else:
return False |
986,620 | 2643a49c01528d9214e307d4538b70bba5df9d6f | country = input().split(', ')
capital = input().split(', ')
pairs = {country: capital for country, capital in zip(country, capital)}
print('\n'. join([f'{country} -> {capital}' for country, capital in pairs.items()]))
|
986,621 | 48ef10851623b47a3fa6454da1c81d5565696980 | class Hotel:
def __init__(self, rooms, floors, Room):
self.rooms = rooms
self.floors = floors
rooms_per_floor = int(self.rooms/self.floors)
last_floor_rooms = self.rooms % self.floors
room_numbers = [int(room) for room in range(1, self.rooms+1)]
self.List_Room = []
self.Structure = []
for floor in range(self.floors):
floors_here = []
for room in range(0, rooms_per_floor):
# floors_here.append(room_numbers.pop(0))
self.List_Room.append(Room(room_numbers.pop(0), floor))
self.Structure.append(floors_here)
last_floor = []
for floor in range(0, last_floor_rooms):
self.List_Room.append(Room(room_numbers.pop(0), self.floors-1))
self.Structure[len(self.Structure)-1].extend(last_floor)
def get_List_Room(self):
return self.List_Room
class Room:
def __init__(self, room_nr, floor):
self.room_nr = room_nr
self.floor = floor
self.Guest = 0
if self.floor == 0:
self.floor = "Parter"
print("Number pokoju :", self.room_nr, " Piętro :", self.floor, "Gość:", self.Guest)
def show_rooms(self):
if self.Guest == 0:
to_print = "-----"
else:
to_print = str(self.Guest)
print("Number pokoju :", self.room_nr, " Piętro :", self.floor, " Gość:", to_print)
def leave_room(self):
self.Guest = 0
print("Pokój został zwolniony")
def __str__(self):
return f"Number pokoju : {self.room_nr} Piętro : {self.floor} Gość: {self.Guest}"
class Person(Hotel):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
def take_room(self, room, room_nr):
self.room_nr = room_nr
if room.Guest == 0:
room.Guest = self.name
print(self.name, "wynajął pokój", self.room_nr+1)
else:
print(self.name, "ten pokój jest zajęty")
def Menu():
floors = int(input("Piętra: "))
rooms = int(input("Pokoje: "))
if floors > rooms:
print("Piętro nie może być puste !")
exit()
else:
h = Hotel(rooms, floors, Room)
room_list = Hotel.get_List_Room(h)
Guest_list = []
while True:
print("1. Dodaj gościa")
print("2. Wynajmij pokój")
print("3. Wymelduj się z pokóju")
print("4. Wyjście z Hotelu")
choice = int(input("Wybierz: "))
if choice == 1:
name = input("Podaj nazwę gościa: ")
Guest_list.append(Person(name))
elif choice == 2:
i = 1
for name in Guest_list:
print(i, name)
i += 1
Guest = int(input("Komu chcesz wynająć pokój (id): "))
for room in room_list:
room.show_rooms()
room_to_give = int(input(f"Który pokój ma wziąć: "))-1
Guest_list[Guest -1].take_room(room_list[room_to_give], room_to_give)
elif choice == 3:
for room in room_list:
room.show_rooms()
room_to_leave = int(input("Który pokój chcesz zwolnić: "))
room_list[room_to_leave-1].leave_room()
elif choice == 4:
print("Naraska ")
break
for i in (room_list):
print(i)
Menu() |
986,622 | 052070d6371a3a70e383aa016378a53dd8c26732 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-13 03:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Host',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname', models.CharField(max_length=25)),
('ip', models.GenericIPAddressField()),
('port', models.IntegerField(max_length=5)),
('group', models.CharField(max_length=25, verbose_name='\u4e3b\u673a\u7ec4')),
('description', models.CharField(default='nothing...', max_length=255)),
],
),
migrations.CreateModel(
name='HostGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group_name', models.CharField(max_length=25)),
],
),
migrations.CreateModel(
name='HostGroupRelevance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='search.HostGroup')),
('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='search.Host')),
],
),
migrations.AddField(
model_name='hostgroup',
name='group_member',
field=models.ManyToManyField(through='search.HostGroupRelevance', to='search.Host'),
),
]
|
986,623 | d0454e4b7773ad209cd8e210c0fe2157e986b42f | #!/usr/bin/env python
"""
Name: add_land_locked_cells_to_mask.py
Author: Mark Petersen, Adrian Turner
Find ocean cells that are land-locked, and alter the cell
mask so that they are counted as land cells.
"""
import os
import shutil
from netCDF4 import Dataset
import numpy as np
import argparse
def removeFile(fileName):
try:
os.remove(fileName)
except OSError:
pass
parser = \
argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-f", "--input_mask_file", dest="input_mask_filename",
help="Mask file that includes cell and edge masks.",
metavar="INPUTMASKFILE", required=True)
parser.add_argument("-o", "--output_mask_file", dest="output_mask_filename",
help="Mask file that includes cell and edge masks.",
metavar="OUTPUTMASKFILE", required=True)
parser.add_argument("-m", "--mesh_file", dest="mesh_filename",
help="MPAS Mesh filename.", metavar="MESHFILE",
required=True)
parser.add_argument("-l", "--latitude_threshold", dest="latitude_threshold",
help="Minimum latitude, in degrees, for transect widening.",
required=False, type=float, default=43.0)
parser.add_argument("-n", "--number_sweeps", dest="nSweeps",
help="Maximum number of sweeps to search for land-locked cells.",
required=False, type=int, default=10)
args = parser.parse_args()
latitude_threshold_radians = args.latitude_threshold*3.1415/180.
# Obtain mesh variables
meshFile = Dataset(args.mesh_filename, "r")
nCells = len(meshFile.dimensions["nCells"])
maxEdges = len(meshFile.dimensions["maxEdges"])
cellsOnCell = meshFile.variables["cellsOnCell"][:, :]
nEdgesOnCell = meshFile.variables["nEdgesOnCell"][:]
latCell = meshFile.variables["latCell"][:]
lonCell = meshFile.variables["lonCell"][:]
meshFile.close()
removeFile(args.output_mask_filename)
shutil.copyfile(args.input_mask_filename,args.output_mask_filename)
# Obtain original cell mask from input file
inputMaskFile = Dataset(args.input_mask_filename, "r")
nRegions = len(inputMaskFile.dimensions["nRegions"])
regionCellMasks = inputMaskFile.variables["regionCellMasks"][:, :]
# set landMask = flattened regionCellMasks
landMask = np.amax(regionCellMasks, axis=1)
inputMaskFile.close()
# Open output file
outputMaskFile = Dataset(args.output_mask_filename, "a")
landMaskDiagnostic = outputMaskFile.createVariable("landMaskDiagnostic", "i", dimensions=("nCells"))
print "Running add_land_locked_cells_to_mask.py. Total number of cells: ", nCells
# use np.array, as simple = makes a pointer
landMaskNew = np.array(landMask)
activeEdgeSum = np.zeros(maxEdges, dtype="i")
# Removable cells are ocean cells outside of latitude threshold
removableCellIndex = np.zeros(nCells, dtype="i")
nRemovableCells = 0
print "Step 1: Searching for land-locked cells. Remove cells that only have isolated active edges."
landLockedCounter = 0
for iCell in range(nCells):
landMaskDiagnostic[iCell] = landMask[iCell]
# skip if outside latitude threshold or if this is already a land cell
if abs(latCell[iCell]) < latitude_threshold_radians or landMask[iCell] == 1:
continue
removableCellIndex[nRemovableCells] = iCell
nRemovableCells += 1
activeEdgeSum[:] = 0
for iEdgeOnCell in range(nEdgesOnCell[iCell]):
# check if neighbor is an ocean cell (landMask=0)
# subtract 1 to convert 1-base to 0-base:
if landMask[cellsOnCell[iCell, iEdgeOnCell]-1] == 0:
activeEdgeSum[iEdgeOnCell] += 1
# % is modulo operator:
iP1 = (iEdgeOnCell + 1) % nEdgesOnCell[iCell]
activeEdgeSum[iP1] += 1
if np.amax(activeEdgeSum[0:nEdgesOnCell[iCell]]) == 1:
outputMaskFile['regionCellMasks'][iCell, 1] = 1
landLockedCounter += 1
landMaskNew[iCell] = 1
landMaskDiagnostic[iCell] = 2
landMask[:] = landMaskNew[:]
print " Number of landLocked cells: ", landLockedCounter
print "Step 2: Searching for land-locked cells. Remove cells that have any isolated active edges."
for iSweep in range(args.nSweeps):
landLockedCounter = 0
for iRemovableCell in range(0, nRemovableCells):
iCell = removableCellIndex[iRemovableCell]
if landMask[iCell] == 1:
continue
for iEdgeOnCell in range(nEdgesOnCell[iCell]):
# check if neighbor is an ocean cell (landMask=0)
# subtract 1 to convert 1-base to 0-base:
if landMask[cellsOnCell[iCell, iEdgeOnCell]-1] == 0:
# % is modulo operator:
iP1 = (iEdgeOnCell + 1) % nEdgesOnCell[iCell]
iM1 = (iEdgeOnCell - 1) % nEdgesOnCell[iCell]
# Is this neighbor's two neighbors to left and right land?
# if so, sum of masks is two.
# subtract 1 to convert 1-base to 0-base:
if (landMask[cellsOnCell[iCell, iP1]-1]
+ landMask[cellsOnCell[iCell, iM1]-1]) == 2:
landLockedCounter += 1
landMaskNew[iCell] = 1
outputMaskFile['regionCellMasks'][iCell, 1] = 1
landMaskDiagnostic[iCell] = 3
# once we remove this cell, we can quit checking over edges
break
landMask[:] = landMaskNew[:]
print " Sweep: ", iSweep+1, "Number of landLocked cells removed: ", landLockedCounter
if landLockedCounter == 0:
break
print "Step 3: Perform flood fill, starting from open ocean."
floodFill = np.zeros(nCells, dtype="i")
floodableCellIndex = np.zeros(nCells, dtype="i")
nFloodableCells = 0
floodFill[:] = -1
d2r = 3.1415/180.0
# init flood fill to 0 for water, -1 for land, 1 for known open ocean regions
for iRemovableCell in range(0, nRemovableCells):
iCell = removableCellIndex[iRemovableCell]
if (landMaskDiagnostic[iCell] == 0):
floodFill[iCell] = 0
if (latCell[iCell] > 84.0*d2r # North Pole
or lonCell[iCell] > 160.0*d2r and lonCell[iCell] < 230.0*d2r and latCell[iCell] > 73.0*d2r # Arctic
or lonCell[iCell] > 315.0*d2r and lonCell[iCell] < 340.0*d2r and latCell[iCell] > 15.0*d2r and latCell[iCell] < 45.0*d2r # North Atlantic
or lonCell[iCell] > 290.0*d2r and lonCell[iCell] < 300.0*d2r and latCell[iCell] > 72.0*d2r and latCell[iCell] < 75.0*d2r # North Atlantic
or lonCell[iCell] > 0.0*d2r and lonCell[iCell] < 10.0*d2r and latCell[iCell] > 70.0*d2r and latCell[iCell] < 75.0*d2r # North Atlantic 2
or lonCell[iCell] > 150.0*d2r and lonCell[iCell] < 225.0*d2r and latCell[iCell] > 0.0*d2r and latCell[iCell] < 45.0*d2r # North Pacific
or lonCell[iCell] > 0.0*d2r and lonCell[iCell] < 5.0*d2r and latCell[iCell] > -60.0*d2r and latCell[iCell] < 0.0*d2r # South Atlantic
or lonCell[iCell] > 180.0*d2r and lonCell[iCell] < 280.0*d2r and latCell[iCell] > -60.0*d2r and latCell[iCell] < -10.0*d2r # South Pacific
or lonCell[iCell] > 0.0*d2r and lonCell[iCell] < 165.0*d2r and latCell[iCell] > -60.0*d2r and latCell[iCell] < -45.0*d2r): # Southern Ocean
floodFill[iCell] = 1
landMaskDiagnostic[iCell] = 5 # indicates seed region
else:
floodableCellIndex[nFloodableCells] = iCell
nFloodableCells += 1
print " Initial number of flood cells: ", nFloodableCells
# sweep over neighbors of known open ocean points
for iSweep in range(0, nCells):
newFloodCellsThisSweep = 0
for iFloodableCell in range(0, nFloodableCells):
iCell = floodableCellIndex[iFloodableCell]
if (floodFill[iCell] == 0):
for iCellOnCellSweep in range(0, nEdgesOnCell[iCell]):
iCellNeighbor = cellsOnCell[iCell, iCellOnCellSweep]-1
if (floodFill[iCellNeighbor] == 1):
floodFill[iCell] = 1
newFloodCellsThisSweep += 1
break
print " Sweep ", iSweep, " new flood cells this sweep: ", newFloodCellsThisSweep
if (newFloodCellsThisSweep == 0):
break
oceanMask = np.zeros(nCells, dtype="i")
for iCell in range(0, nCells):
if (floodFill[iCell] == 1):
oceanMask[iCell] = 1
print "Step 4: Searching for land-locked cells, step 3: revert cells with connected active edges"
for iSweep in range(args.nSweeps):
landLockedCounter = 0
for iRemovableCell in range(0, nRemovableCells):
iCell = removableCellIndex[iRemovableCell]
# only remove a cell that was added in lats round (red cells)
if landMaskDiagnostic[iCell] == 3:
for iEdgeOnCell in range(nEdgesOnCell[iCell]):
# check if neighbor is an ocean cell (landMask=0)
# subtract 1 to convert 1-base to 0-base:
if oceanMask[cellsOnCell[iCell, iEdgeOnCell]-1] == 1:
# % is modulo operator:
iP1 = (iEdgeOnCell + 1) % nEdgesOnCell[iCell]
iM1 = (iEdgeOnCell - 1) % nEdgesOnCell[iCell]
# Is either of this neighbor's two neighbors to left and right ocean?
# if so, sum of masks is two.
# subtract 1 to convert 1-base to 0-base:
if (landMask[cellsOnCell[iCell, iP1]-1] == 0
or landMask[cellsOnCell[iCell, iM1]-1] == 0):
landLockedCounter += 1
landMaskNew[iCell] = 0
outputMaskFile['regionCellMasks'][iCell, 1] = 0
landMaskDiagnostic[iCell] = 4
oceanMask[iCell] = 1
# once we remove this cell, we can quit checking over edges
break
landMask[:] = landMaskNew[:]
print " Sweep: ", iSweep+1, "Number of land-locked cells returned: ", landLockedCounter
if landLockedCounter == 0:
break
outputMaskFile.close()
|
986,624 | ee9f8d3134fb2c07b484750a856db6bea48ad6b7 | """
Key bindings, for scrolling up and down through pages.
This are separate bindings, because GNU readline doesn't have them, but
they are very useful for navigating through long multiline buffers, like in
Vi, Emacs, etc...
"""
from __future__ import unicode_literals
from prompt_toolkit.layout.utils import find_window_for_buffer_name
from six.moves import range
__all__ = (
'scroll_forward',
'scroll_backward',
'scroll_half_page_up',
'scroll_half_page_down',
'scroll_one_line_up',
'scroll_one_line_down',
)
def _current_window_for_event(event):
"""
Return the `Window` for the currently focussed Buffer.
"""
return find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
def scroll_forward(event, half=False):
"""
Scroll window down.
"""
w = _current_window_for_event(event)
b = event.cli.current_buffer
if w and w.render_info:
info = w.render_info
ui_content = info.ui_content
# Height to scroll.
scroll_height = info.window_height
if half:
scroll_height //= 2
# Calculate how many lines is equivalent to that vertical space.
y = b.document.cursor_position_row + 1
height = 0
while y < ui_content.line_count:
line_height = info.get_height_for_line(y)
if height + line_height < scroll_height:
height += line_height
y += 1
else:
break
b.cursor_position = b.document.translate_row_col_to_index(y, 0)
def scroll_backward(event, half=False):
"""
Scroll window up.
"""
w = _current_window_for_event(event)
b = event.cli.current_buffer
if w and w.render_info:
info = w.render_info
# Height to scroll.
scroll_height = info.window_height
if half:
scroll_height //= 2
# Calculate how many lines is equivalent to that vertical space.
y = max(0, b.document.cursor_position_row - 1)
height = 0
while y > 0:
line_height = info.get_height_for_line(y)
if height + line_height < scroll_height:
height += line_height
y -= 1
else:
break
b.cursor_position = b.document.translate_row_col_to_index(y, 0)
def scroll_half_page_down(event):
"""
Same as ControlF, but only scroll half a page.
"""
scroll_forward(event, half=True)
def scroll_half_page_up(event):
"""
Same as ControlB, but only scroll half a page.
"""
scroll_backward(event, half=True)
def scroll_one_line_down(event):
"""
scroll_offset += 1
"""
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.cli.current_buffer
if w:
# When the cursor is at the top, move to the next line. (Otherwise, only scroll.)
if w.render_info:
info = w.render_info
if w.vertical_scroll < info.content_height - info.window_height:
if info.cursor_position.y <= info.configured_scroll_offsets.top:
b.cursor_position += b.document.get_cursor_down_position()
w.vertical_scroll += 1
def scroll_one_line_up(event):
"""
scroll_offset -= 1
"""
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.cli.current_buffer
if w:
# When the cursor is at the bottom, move to the previous line. (Otherwise, only scroll.)
if w.render_info:
info = w.render_info
if w.vertical_scroll > 0:
first_line_height = info.get_height_for_line(info.first_visible_line())
cursor_up = info.cursor_position.y - (info.window_height - 1 - first_line_height -
info.configured_scroll_offsets.bottom)
# Move cursor up, as many steps as the height of the first line.
# TODO: not entirely correct yet, in case of line wrapping and many long lines.
for _ in range(max(0, cursor_up)):
b.cursor_position += b.document.get_cursor_up_position()
# Scroll window
w.vertical_scroll -= 1
def scroll_page_down(event):
"""
Scroll page down. (Prefer the cursor at the top of the page, after scrolling.)
"""
w = _current_window_for_event(event)
b = event.cli.current_buffer
if w and w.render_info:
# Scroll down one page.
line_index = max(w.render_info.last_visible_line(), w.vertical_scroll + 1)
w.vertical_scroll = line_index
b.cursor_position = b.document.translate_row_col_to_index(line_index, 0)
b.cursor_position += b.document.get_start_of_line_position(after_whitespace=True)
def scroll_page_up(event):
"""
Scroll page up. (Prefer the cursor at the bottom of the page, after scrolling.)
"""
w = _current_window_for_event(event)
b = event.cli.current_buffer
if w and w.render_info:
# Put cursor at the first visible line. (But make sure that the cursor
# moves at least one line up.)
line_index = max(0, min(w.render_info.first_visible_line(),
b.document.cursor_position_row - 1))
b.cursor_position = b.document.translate_row_col_to_index(line_index, 0)
b.cursor_position += b.document.get_start_of_line_position(after_whitespace=True)
# Set the scroll offset. We can safely set it to zero; the Window will
# make sure that it scrolls at least until the cursor becomes visible.
w.vertical_scroll = 0
|
986,625 | 6a0be84af25c8692bbc99f3579eab874bdfad99e | # https://leetcode.com/problems/isomorphic-strings/
import unittest
import string
class Solution:
def is_isomorphic(self, str1, str2):
# checks
if not len(str1) == len(str2) \
or any([x for x in list(str1) if x not in string.ascii_letters]) \
or any([x for x in list(str2) if x not in string.ascii_letters]):
return False
hmap = {}
for i in range(0, len(str1)):
if str1[i] not in hmap:
if str2[i] in hmap.values():
return False
else:
hmap[str1[i]] = str2[i]
else:
if hmap[str1[i]] != str2[i]:
return False
# if str1[i] not in hmap:
# if str2[i] in hmap.values():
# return False
# else:
# hmap[str1[i]] = str2[i]
# else:
# if hmap[str1[i]] != str2[i]:
# return False
return True
# test
# s = Solution()
#
# test_cases = [["egg", "add"],["foo", "bar"],["paper", "title"]]
# for t1, t2 in test_cases:
# print(t1, t2, s.jewels_stones(t1,t2))
class TestInt(unittest.TestCase):
def test_integer(self):
s = Solution()
test_cases = [["egg", "add"],["foo", "bar"],["paper", "title"]]
test_results = [True, False, True]
for i, test_case in enumerate(test_cases):
self.assertEqual(s.is_isomorphic(test_case[0], test_case[1]), test_results[i])
if __name__ == '__main__':
unittest.main()
|
986,626 | 1feaa2167bbee90ab7f7da3f7a9814f2a1c830ff | import numpy as np
import Option
import util
import Convert
import numexpr as ne
def solve(Df, bf, alpha, opt):
'''
(D^tD + αI)x = b を解く関数
'''
I_N = np.ones(opt.N)
Dtf = np.conj(Df)
DDtf = np.zeros(opt.N, dtype=np.complex)
ansf = np.zeros((opt.M, opt.N), dtype=np.complex)
DDtf = np.sum(Df * Dtf, axis=0)
tmp = 1 / (I_N + DDtf / alpha)
tmp2 = ne.evaluate("tmp * Df")
c = util.create_Dxf(tmp2, bf, opt)
tmp3 = ne.evaluate("Dtf * c")
ansf = (bf / alpha) - (tmp3 / (alpha * alpha))
ans = Convert.IFFT_NMvector(ansf, opt)
return ans |
986,627 | 669e9510d0b5aa5f1406d2a23bb365502f615781 | from django.conf.urls import url
from django.urls import path
from app import views
urlpatterns = [
path('', views.HomeView.as_view(), name='home'),
path('eiou_claim/', views.EIOUClaimViews.as_view(), name='eiou_claim'),
path('upload/', views.upload, name='upload'),
]
|
986,628 | 79f5abc3912d0a4cb936a089c4da81b07be1c751 | import requests, json
file = 'aPrivateOne.json'
apiKey = 'apiKey'
url = 'https://api.github.com/repos/datarepresentationstudent/aPrivateOne'
response = requests.get(url, auth=('token', apiKey))
repoJSON = response.json()
#save as json file
with open(file,"w") as new_file:
json.dump(repoJSON, new_file, indent=4) |
986,629 | 577dec6938532c91892f88f3eac661bc4380f55f | def pesquisa_binaria(lista, item):
baixo = 0
alto = len(lista) - 1
while baixo <= alto:
meio = (baixo + alto) // 2
chute = lista[meio]
if chute == item:
return meio
if chute > item:
alto = meio - 1
else:
baixo = meio + 1
return None
minha_lista = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29]
print(pesquisa_binaria(minha_lista, 29))
|
986,630 | e7cb029ba4749c7edb25a1d503d0fc9036bd80ac | def myfun(a,b,c):
print("a-->",a)
print("b-->", b)
print("c-->", c)
#位置参数
myfun(1,2,3)
#序列参数,*s 以元组的方式传递给函数,函数按照顺序解析
s = [11,22,33]
myfun(*s)
s2 =(1.1,2.2,3.3)
myfun(*s2)
s3="abc"
myfun(*s3)
print("#"*20, 'key word parameters', "#"*20)
myfun(a='a1', b='a2',c='a3')
print("*"*20)
myfun(c='a1', a='a2', b=' a3')
print("*"*20)
d = {'a': 10, 'b': 20, 'c': 30}
myfun(**d)
print("#"*20, 'composite parameters', "#"*20)
myfun(100, *(200, 300))
print("*"*20)
myfun(100, **{'b': 200, 'c': 300})
print("*"*20)
myfun(*[110], 220, *(330,))
|
986,631 | 00e4a413552363350f2300dd3597d5d59aab86ef | #! /usr/bin/env python3
'''
This script decrypt the encrypted content using a rsa private key
'''
import sys
import argparse
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa, padding
from cryptography.hazmat.primitives import serialization, hashes
def rsaGenerate(filename='rsa'):
private_key = rsa.generate_private_key(public_exponent=65537,key_size=2048,backend=default_backend())
public_key = private_key.public_key()
private_key_bytes = private_key.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.TraditionalOpenSSL,encryption_algorithm=serialization.NoEncryption())
public_key_bytes = public_key.public_bytes(encoding=serialization.Encoding.PEM,format=serialization.PublicFormat.SubjectPublicKeyInfo)
with open(f'{filename}','wb') as f:
f.write(private_key_bytes)
with open(f'{filename}_pub.pem','wb') as f:
f.write(public_key_bytes)
def rsaDecrypt(cipher,privatekey_bytes):
private_key = serialization.load_pem_private_key(privatekey_bytes,backend=default_backend(),password=None)
recovered = private_key.decrypt(cipher,padding.OAEP(mgf = padding.MGF1(algorithm = hashes.SHA256()), algorithm = hashes.SHA256(),label = None))
return recovered
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-g',help="Generate RSA key pair",action="store_true")
parser.add_argument('file',help="Private key to decrypt or name of the RSA keys with -g")
args = parser.parse_args()
if args.g:
if sys.argv[2]:
rsaGenerate(sys.argv[2])
else:
rsaGenerate()
sys.exit()
print("Paste the encrypted content in hex format")
try:
cipher = bytes.fromhex(input(">> "))
except:
print('[Error] invalid format')
sys.exit()
with open(sys.argv[1],'rb') as f:
key_bytes = f.read()
recover = rsaDecrypt(cipher,key_bytes)
print(f"Key used:\n{recover.hex()}")
|
986,632 | 775fbecc46df832c5e1e0bad022fdfbb0a2040ff | from django.http.response import HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.contrib.auth.mixins import LoginRequiredMixin
from polls.models import Choice, Question
class IndeXView(LoginRequiredMixin, ListView):
"""Render 5 most recent polls """
login_url = 'login'
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
return Question.objects.order_by('-pub_date')[:5]
class DetailView(LoginRequiredMixin, DetailView):
""" Render details of a single poll"""
login_url = 'login'
model = Question
template_name = 'polls/detail.html'
class ResultView(DetailView):
""" Render result of a single poll"""
model = Question
template_name = 'polls/result.html'
def vote(request, question_id):
""" Performs voting for a poll """
user = request.user
if not user.is_authenticated:
return redirect('login')
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
context = {
'question' : question,
'error_message' : "You didn't select a choice."
}
return render(request, 'polls/detail.html',context)
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('results',args=(question.id,)))
|
986,633 | 861a0ee5f5c1e0f2e01c3c65762e1a3e48476050 | #!/usr/bin/env python3
#
# https://www.opavote.com/help/overview#blt-file-format
import sys
import urllib.parse
def foo(fname, out):
rankings = []
with open(fname, 'rt') as fin:
fi = iter(fin)
line = next(fi)
candidates, seats = [int(x.strip()) for x in line.split()]
for line in fi:
vparts = [int(x.strip()) for x in line.split()]
if vparts[0] == 0:
break
if vparts[0] != 1:
sys.stderr.write("can't deal with weight !=1: %r\n", line)
sys.exit(1)
if vparts[-1] != 0:
sys.stderr.write("vote doesn't end with 0: %r\n", line)
sys.exit(1)
rankings.append(vparts[1:-1])
cnames = []
title = None
for line in fin:
line = line.strip()
if len(cnames) < candidates:
if line[0] == '"' and line[-1] == '"':
line = line[1:-1]
cnames.append(line)
elif title is None:
title = line
else:
sys.stderr.write("unexpected line at end: %r\n", line)
sys.exit(1)
for r in rankings:
rate = candidates + 1
parts = []
for ci in r:
parts.append((cnames[ci-1], rate))
rate -= 1
out.write(urllib.parse.urlencode(parts) + '\n')
def main():
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('infile', nargs='?')
ap.add_argument('-i', '--in', dest='inname', default=None)
ap.add_argument('-o', '--out', default=None)
args = ap.parse_args()
inname = args.infile or args.inname
outname = args.out
if outname is None or outname == '-':
out = sys.stdout
else:
out = open(outname, 'wt')
foo(inname, out)
return
if __name__ == '__main__':
main()
|
986,634 | 83fb62802671755304b28ea53a8316fdefe78c28 | # -*- coding: utf-8 -*-
from openerp import models, api
class account_tax(models.Model):
"""
OverWrite Tax Account
"""
_inherit = 'account.tax'
@api.model
def _get_tax_calculation_rounding_method(self, taxes):
if taxes and taxes[0].company_id.tax_calculation_rounding_method:
return taxes[0].company_id.tax_calculation_rounding_method
else:
return False
@api.v7
def compute_all(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None, force_excluded=False, context=None):
# By default, for each tax, tax amount will first be computed
# and rounded at the 'Account' decimal precision for each
# PO/SO/invoice line and then these rounded amounts will be
# summed, leading to the total amount for that tax. But, if the
# company has tax_calculation_rounding_method = round_globally,
# we still follow the same method, but we use a much larger
# precision when we round the tax amount for each line (we use
# the 'Account' decimal precision + 5), and that way it's like
# rounding after the sum of the tax amounts of each line
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
tax_compute_precision = precision
rounding_method = self._get_tax_calculation_rounding_method(cr, uid, taxes, context)
if taxes and rounding_method == 'round_globally':
tax_compute_precision += 5
totalin = totalex = round(price_unit * quantity, precision)
tin = []
tex = []
for tax in taxes:
if not tax.price_include or force_excluded:
tex.append(tax)
else:
tin.append(tax)
tin = self.compute_inv(cr, uid, tin, price_unit, quantity, product=product, partner=partner, precision=tax_compute_precision)
for r in tin:
totalex -= r.get('amount', 0.0)
totlex_qty = 0.0
try:
totlex_qty = totalex/quantity
except:
pass
tex = self._compute(cr, uid, tex, totlex_qty, quantity, product=product, partner=partner, precision=tax_compute_precision)
for r in tex:
totalin += r.get('amount', 0.0)
return {
'total': totalex,
'total_included': totalin,
'taxes': tin + tex
}
# @api.v8
# def compute_all(self, price_unit, quantity, product=None,
# partner=None, force_excluded=False):
# return self._model.compute_all(
# self._cr, self._uid, self, price_unit, quantity,
# product=product, partner=partner, force_excluded=force_excluded,
# context=self._context)
|
986,635 | 0ab5afccc44002ea921f79570cfbe4a4f2c4b212 | #!/usr/bin/env python3
from fateadm_api import FateadmApi
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.select import Select
import traceback
import requests
import sys
import time
import json
from datetime import datetime, timedelta
from bot import Bot
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36'
options = Options()
options.add_argument('--headless')
options.add_argument(f'--user-agent="{USER_AGENT}"')
driver = webdriver.Firefox(options=options)
wait = WebDriverWait(driver, 5, 0.5)
config = json.load(open('config.json'))
fateadm_api = FateadmApi(app_id=None, app_key=None, pd_id=config['fateadm_id'], pd_key=config['fateadm_key'])
bot = Bot(config['telegram_token'])
captcha_cache = {}
refunded_captcha = {}
def recognize_captcha(image_url):
# search cache
if image_url in captcha_cache:
print('Captcha in cache')
return captcha_cache[image_url]
print('Using fateadm to recognize captcha')
res = requests.get(image_url, headers={"User-Agent": USER_AGENT})
captcha_result = fateadm_api.Predict("20400", res.content)
if captcha_result.ret_code == 0:
result = captcha_result.pred_rsp.value
request_id = captcha_result.request_id
captcha_cache[image_url] = (request_id, result)
return request_id, result
else:
raise Exception(f'Captcha API request failed: {captcha_result.ret_code} {captcha_result.err_msg}')
def fill_credentials():
print('Filling credentials...')
input_name = wait.until(EC.presence_of_element_located((By.ID, 'userName')))
input_name.clear()
input_name.send_keys(config['neea_username'])
input_password = wait.until(EC.presence_of_element_located((By.ID, 'textPassword')))
input_password.clear()
input_password.send_keys(config['neea_password'])
def get_captcha():
print('Getting captcha...')
input_captcha = wait.until(EC.presence_of_element_located((By.ID, 'verifyCode')))
input_captcha.click()
captcha_img = wait.until((EC.presence_of_element_located((By.ID, "chkImg"))))
retry_count = 0
time.sleep(5)
captcha_url = "loading"
while captcha_url is None or 'loading' in captcha_url:
captcha_url = driver.find_element_by_id('chkImg').get_attribute("src")
retry_count += 1
if retry_count == 10:
raise Exception('Fetching captcha timeout')
time.sleep(1)
return captcha_url
def fill_captcha_and_login(captcha):
print(f'Trying to login with captcha {captcha}...')
input_captcha = wait.until(EC.presence_of_element_located((By.ID, 'verifyCode')))
input_captcha.clear()
input_captcha.send_keys(captcha)
submit_button = wait.until(EC.presence_of_element_located((By.ID, 'btnLogin')))
submit_button.click()
try:
wait.until(EC.url_contains(config['neea_username']))
# wait.until(EC.text_to_be_present_in_element((By.XPATH, '//div[@class="myhome_info_cn"]/span[2]'), '21073302'))
print('Login succeeded')
return True
except:
print('Login failed')
traceback.print_exc()
return False
def crawl_toefl_info():
driver.delete_all_cookies()
driver.get('https://toefl.neea.cn/login')
retry_count = 0
req_id = None
captcha_url = None
captcha = None
while captcha is None or not fill_captcha_and_login(captcha):
# ask for refund for previous results
if req_id is not None and captcha_url not in refunded_captcha:
fateadm_api.Justice(req_id)
refunded_captcha[captcha_url] = True
# try again
fill_credentials()
captcha_url = get_captcha()
print(f'Captcha URL: {captcha_url}')
req_id, captcha = recognize_captcha(captcha_url)
retry_count += 1
if retry_count > 5:
raise Exception('Retry to login for too many times, aborting...')
seat_button = wait.until(EC.element_to_be_clickable((By.LINK_TEXT, '考位查询')))
seat_button.click()
time.sleep(1)
# the following two queries must be executed prior to queryTestSeats
testDays = list(driver.execute_script('return $.getJSON("testDays")'))
print(f'Test days: {testDays}')
provinces = driver.execute_script('return $.getJSON("/getTestCenterProvinceCity")')
print(provinces)
cities = config['city_list']
if cities is None: # null in config means fetching all cities
cities = []
for province in provinces:
for city in provinces['cities']:
cities.append(city['cityNameEn'])
vacancies = {}
earliest_vacancies = {}
def process_items(items):
for item in items:
info = {}
info["city"] = item.find_element_by_xpath('./td[1]').text
info["location"] = item.find_element_by_xpath('./td[2]').text
info["fee"] = item.find_element_by_xpath('./td[3]').text
info["vacant"] = item.find_element_by_xpath('./td[4]').text != '名额暂满'
vacancies[city][date].append(info)
print(info)
# mark it
if info["vacant"] and earliest_vacancies[city] is None:
info["date"] = date
earliest_vacancies[city] = info
for city in cities:
vacancies[city] = {}
earliest_vacancies[city] = None
print(f'Checking city {city}')
for date in testDays:
vacancies[city][date] = []
print(f'Test day {date}')
# crawl by mimicing
Select(driver.find_element_by_id('centerProvinceCity')).select_by_value(city)
Select(driver.find_element_by_id('testDays')).select_by_value(date)
time.sleep(0.5)
query_button = wait.until(EC.element_to_be_clickable((By.ID, 'btnQuerySeat')))
while True:
try:
query_button.click()
WebDriverWait(driver, 2, 0.1).until(EC.text_to_be_present_in_element((By.XPATH, '//div[@id="qrySeatResult"]/h4'), '考位查询结果'))
# will fail for several times
break
except:
# retry
print('Result not crawled')
pass
items = driver.find_elements_by_xpath('//table[@class="table table-bordered table-striped"][1]/tbody/tr')
process_items(items)
try:
# sometimes there are two exams on one day
items = driver.find_elements_by_xpath('//table[@class="table table-bordered table-striped"][2]/tbody/tr')
print('multiple exam times detected')
process_items(process_items)
except:
pass
# this is not gonna work (so strange)
# js = 'const callback = arguments[arguments.length - 1]; $.getJSON("testSeat/queryTestSeats",{{city: "{}",testDay: "{}"}}, callback).fail(() => callback("error"))'.format(city, date)
# print(js)
# while True:
# dataJSON = driver.execute_async_script(js)
# if dataJSON == 'error':
# print('result not crawled')
# else:
# print(dataJSON)
# break
# if dataJSON is None or not dataJSON['status']:
# print(city, date, 'No data fetched')
# continue
# else:
# print(city, date, f'Data fetched with')
# for testTime, testPlaces in dataJSON['testSeats'].items():
# for testPlace in testPlaces:
# if testPlace['seatStatus'] != 0:
# print(f'Found available test seat at {testPlace["cityCn"]} {testPlace["centerNameCn"]}')
# # df = pd.DataFrame(dataDetail)
# # df['date'] = date
# # storage = pd.concat([storage,df],ignore_index=True)
# time.sleep(4.18)
json.dump(vacancies, open(f'data/{datetime.now().strftime("%Y%m%d-%H%M%S")}.json', 'w'), indent=4, ensure_ascii=False)
return earliest_vacancies
if __name__ == '__main__':
last_earliest = {}
interval = config['interval']
time_format = "%Y/%m/%d %H:%M:%S"
while True:
try:
next_time = datetime.now() + timedelta(seconds=interval)
next_time_str = next_time.strftime(time_format)
print('Start crawling...')
earliest_vacancies = crawl_toefl_info()
# format bot message and send
s = f'爬取时间:{datetime.now().strftime(time_format)}\n'
s += '最早空余考位'
if earliest_vacancies != last_earliest:
notification = True
s += '(有变化):\n'
else:
notification = False
s += '(未变化):\n'
for city, info in earliest_vacancies.items():
if info is not None:
s += f'{info["city"]}:{info["date"]} {info["location"]}\n'
else:
s += f'{info["city"]}:无\n'
s += f'fateadm 余额:{fateadm_api.QueryBalc().cust_val}\n'
s += f'下次爬取时间:{next_time_str}'
bot.earliest_reply = s
last_earliest = earliest_vacancies
message = bot.send_message(s, config['telegram_chat_id'], notification)
if notification: # ping latest version
bot.bot.pin_chat_message(chat_id=config['telegram_chat_id'], message_id=message.message_id)
except:
traceback.print_exc()
s = f'Excecption occurred: {traceback.format_exc()})'
bot.send_message(s, config['telegram_chat_id'])
print(f'Next crawl time: {next_time_str}')
delta = next_time - datetime.now()
time.sleep(delta.seconds)
driver.quit()
|
986,636 | 1a5818195d8f56aafcb740a4864764dd39bad168 | #!/usr/bin/env python
from math import *
import numpy as np
import multiprocessing
import time
import srcprop as srcprop
import srclensprop as slp
from input_qg import *
###########################################################
## INPUTS:
## Foreground galaxy catalog which has Ra,Dec,Redshift,Magnitudes,
## major-axis,minor-axis,position angle of the ellipticity
##
## PURPOSE:
## Use galaxy catalog to determine which of the galaxies will be potential lenses and
## generate background source magnitudes and redshifts for all sources behind a
## given lensing galaxy.
## Note that background source number density is artificially increased to
## increase the chances of lensing due to the respective foreground galaxy
###########################################################
def worker(num,nproc):
np.random.seed(num*10+23424);
srcprop.setlogfile("LOG.%02d.txt"%(num),"GALLENSES.%02d.txt"%(num),"QSOLENSES.%02d.txt"%(num));
## Read galaxy lens catalog
gra,gdec,zd,gu,gg,gr,gi1,gy,gz,majax,minax,ell_pa=np.loadtxt(lenscatalog,usecols=(1,2,5,8,9,10,11,12,13,24,25,22),unpack=True);
indx,gid,gfld=np.loadtxt(lenscatalog,dtype={'names':('indx','gid','gfld'),'formats':('S3','i8','S13')},usecols=(0,4,3),unpack=True);
## Combine i-y band into one
gi=gi1*1.0;
for jj in range (gi1.size):
if(gi1[jj]<0.):
gi[jj]=gy[jj];
## Lens ellipticity
ell=1. - minax/majax;
qgal=1-ell;
nqso=0;
ngal=0;
iimin=np.int(zd.size*num/nproc);
iimax=np.int(zd.size*(num+1)/nproc);
if(num==nproc-1):
iimax=zd.size;
for ii in range(iimin,iimax):
if(ii%10000==0):
print "Iter",ii,num;
srcprop.fp1.write("LENSES: %d %d %d %d: \n"%(num,ii,nqso,ngal));
if(gg[ii]>0 and gr[ii]>0 and zd[ii] <0.9):
## Extract Lens Id, Lens Velocity Dispersion, Einstein Radius, Source Magnitude, Source Redshift
## For Quasars
nnew,listmag,listz,rands,vdisp=srcprop.Nsrc_qso(gg[ii],gr[ii],zd[ii],qgal[ii]);
if(nnew>0 and vdisp>0.):
for kk in range(nnew):
reinst=slp.getreinst(zd[ii],listz[kk],vdisp);
## Keep lenses with Reinst between 1.2 and 5 arcsec only
if(reinst*206264.8>=1.2 and reinst*206264.8 <=5.):
srcprop.fp2.write("%d %f %f %f %f \n"%(ii,vdisp,reinst*206264.8,listmag[kk],listz[kk]));
nqso=nqso+1;
## For Galaxies
nnew,listmag,listz,rands,vdisp=srcprop.Nsrc_gal(gg[ii],gr[ii],zd[ii],qgal[ii]);
if(nnew>0 and vdisp>0.):
for kk in range(nnew):
reinst=slp.getreinst(zd[ii],listz[kk],vdisp);
## Keep lenses with Reinst between 1.2 and 5 arcsec only
if(reinst*206264.8>=1.2 and reinst*206264.8 <=5.):
srcprop.fp3.write("%d %f %f %f %f \n"%(ii,vdisp,reinst*206264.8,listmag[kk],listz[kk]));
ngal=ngal+1;
print "Total number of quasar lenses identified by slave:%d in the survey are %d"%(num,nqso)
print "Total number of galaxy lenses identified by slave:%d in the survey are %d"%(num,ngal)
srcprop.fp2.close();
srcprop.fp3.close();
srcprop.fp1.write("Total number of quasar lenses identified by slave:%d in the survey are %d\n"%(num,nqso));
tm=time.localtime()
srcprop.fp1.write("Hour:%d Min:%d Sec:%d"%(tm.tm_hour,tm.tm_min,tm.tm_sec));
srcprop.fp1.close();
## Run this code faster by specifying Nproc (no. of processors)
jobs=[];
for i in range(Nproc):
p = multiprocessing.Process(target=worker,args=(i,Nproc))
jobs.append(p)
p.start()
print "#####################################################################";
|
986,637 | 6895911122e9efb46407eaee779c6f79c7ef3bc9 | #!/usr/bin/python3
import itertools
from intcode09 import Intcode, MissingInputError
def read_input(fname="day11.in"):
"""
Read the input file and return the list of numbers that it contains.
:param fname: A string name of the file to read.
:return: A list of `int` values contained in the firs line of the file
(comma-separated).
"""
with open(fname) as f:
return [int(v.strip()) for v in next(f).split(",")]
def paint_panels(mem, init_col):
"""
Pain the panels.
:param mem: A program (a `list` of `int` values).
:param init_col: The initial color of the first panel.
:return: Painted panels, as a `dict` associating `(x, y)` positions to
their colors.
"""
result = dict()
inp = list()
out = list()
prog = Intcode(mem[:], inp, out)
# Directions
dirs = [(0, -1), (1, 0), (0, 1), (-1, 0)]
# Direction index (i.e., index in `dirs`)
di = 0
# Current position
pos = (0, 0)
for i in itertools.count(0):
inp.append(result.get(pos, 0 if i else init_col))
try:
prog.run()
except MissingInputError:
pass
else:
break
finally:
result[pos] = out.pop(0)
di = (di - 1 + 2 * out.pop(0)) % len(dirs)
pos = tuple(p + d for p, d in zip(pos, dirs[di]))
return result
def part1(mem):
"""
Solve part 1 of the puzzle for the given program.
"""
return len(paint_panels(mem, 0))
def part2(mem):
"""
Solve part 2 of the puzzle for the given program.
"""
panels = paint_panels(mem, 1)
min_x, min_y, max_x, max_y = tuple(
f(k[i] for k in panels.keys()) for f in (min, max) for i in (0, 1)
)
drawing = [[" "] * (max_x - min_x + 1) for _ in range(min_y, max_y + 1)]
for (x, y), col in panels.items():
if col:
drawing[y][x] = "\u2588"
return "\n".join(f" {''.join(line)}" for line in drawing)
if __name__ == "__main__":
mem = read_input()
print("Part 1:", part1(mem))
print("Part 2:", part2(mem), sep="\n")
|
986,638 | 606c99846e5b58e848a9840eb9bc50c7c4d51ffd | import json
import os
import re
import sys
import time
from collections import OrderedDict
""" Hearthstone Pack Statistics Tracker """
# Mac Paths
stats_file = os.path.expanduser('~/Documents/hs_pack_stats.txt')
hs_log_dir = os.path.expanduser('~/Library/Preferences/Blizzard/Hearthstone/Logs')
if not os.path.exists(hs_log_dir):
sys.exit('Error: Hearthstone log dir does not exist: ' + hs_log_dir)
# By default only read latest log file
log = sorted(
[os.path.join(hs_log_dir, f) for f in os.listdir(hs_log_dir)],
key=lambda l: os.path.getmtime(l),
reverse=True
)[0]
# Windows Paths (untested)
# IMPORTANT: Comment out all lines above this
# stats_file = os.path.expanduser('~\\My Documents\\hs_pack_stats.txt')
# log = os.path.expanduser('~\\AppData\\Local\\Blizzard\\Hearthstone\\Logs\\Power.log')
# Re-organize Hearthstone json data by ids
hs_data = {}
with open('AllSets.json', 'r') as f:
data = json.load(f)
for key in data:
for d in data[key]:
hs_data[d.pop('id')] = d
pack_re = re.compile('.+\[Achievements\] NotifyOfCardGained: \[name=(.+?) cardId=(.+?) type=.+Premium=(.+?)\] (.+)')
stats = OrderedDict([
('rarity', {}),
('playerClass', {}),
('type', {}),
('race', {}),
('bling', {
'plain': 0,
'golden': 0,
}),
('dups', 0),
('total', 0)
])
def update_hs_stats(card_id):
for k in stats.keys():
v = None
if k in hs_data[card_id]:
v = hs_data[card_id][k]
elif k == 'playerClass':
v = 'Neutral'
if v:
if v not in stats[k]:
stats[k][v] = 0
stats[k][v] += 1
# Sort everything alphabetically
# except for Class - Neutral (sort to the end)
def custom_sort(a, b):
if a == 'Neutral':
a = 'ZZZ'
if b == 'Neutral':
b = 'ZZZ'
if a > b:
return 1
return -1
def format_stats(stats, fs, total, level=0):
length = 14
keys = stats.keys()
if level > 0:
keys.sort(cmp=custom_sort)
for k in keys:
if k == 'playerClass':
name = 'Class'
else:
name = k.capitalize()
if isinstance(stats[k], (int, long)):
indent = 2 * level
pad = length - len(name) - indent
pct = 0
if total > 0:
pct = (float(stats[k]) / total) * 100
fs.append(' ' * indent + '{name}{v: {pad}} ({pct:.2f}%)'.format(
name=name, v=stats[k], pad=pad, pct=pct
))
else:
fs.append(name)
format_stats(stats[k], fs, total, level+1)
if level == 0:
fs.append('')
return fs
# Update stats file when hs log file is updated
file_mod = 0
file_pos = -1
while True:
if os.path.getmtime(log) > file_mod:
file_mod = os.path.getmtime(log)
with open(log, 'r') as f:
if file_pos > 0:
f.seek(file_pos)
for line in f:
m = pack_re.match(line)
if m:
name, card_id, golden, owned = (
m.group(1),
m.group(2),
True if m.group(3) == 'GOLDEN' else False,
int(m.group(4))
)
# Filter out cards not received from packs
card_set, _ = card_id.split('_')
if card_set not in ('EX1', 'GVG', 'AT'):
continue
if card_set == 'EX1' and \
'howToGet' in hs_data[card_id] or \
'howToGetGold' in hs_data[card_id]:
continue
# Update stats using Hearthstone data
update_hs_stats(card_id)
# Update stats outside of Hearthstone data
if golden:
stats['bling']['golden'] += 1
else:
stats['bling']['plain'] += 1
if owned > 2:
stats['dups'] += 1
stats['total'] += 1
file_pos = f.tell()
fs = []
fs = '\n'.join(format_stats(stats, fs, stats['total']))
print fs
with open(stats_file, 'w') as f:
f.write(fs)
time.sleep(1)
|
986,639 | 4cce9aee92d4aab52cf67e83b7cc59aff6e84002 | import torch
import numpy as np
import tqdm as tq
from torch import nn
'''
Transformation 'np.array' to 'torch.tensor'
'''
def To_tensor(np_array, dtype=np.float32) :
if np_array.dtype != dtype :
np_array = np_array.astype(dtype)
return torch.from_numpy(np_array)
'''
Initialize weights of Layers
'''
# Initialize method could affects performance
def Init_Layers(layers) :
for layer in layers :
nn.init.normal_(layer.weight, mean=0.0, std=0.1)
nn.init.constant_(layer.bias, 0.)
return
'''
Push : Gradient of Local_A2C ---> Gradient of Global_A2C
Pull : Parameter of Local_A2C <--- Parameter of Global_A2C
'''
def Push_and_Pull(Optimizer, Local_A2C, Global_A2C, done, next_state, state_batch, action_batch, reward_batch, discount_factor) :
'''
Push : Gradient of Local_A2C ---> Gradient of Global_A2C
'''
# when a next_state_value is Terminal state, the value of it must be 0.
# 'np.array[None, :]' : adds 1 more-dimension to the first dim index
# 'np.array[:, None]' : adds 1 more-dimension to the second dim index
next_state_value = 0. if done else Local_A2C.forward(To_tensor(next_state[None, :]))[-1].data.numpy()[0, 0]
# Calculate TD-targets and append them to a buffer
td_target_buffer = []
for reward in reward_batch[::-1] :
next_state_value = reward + discount_factor * next_state_value
td_target_buffer.append(next_state_value)
td_target_buffer.reverse()
# Calculate total loss of mini-batches (buffers)
loss = Local_A2C.loss_function(
To_tensor(np.vstack(state_batch)),
To_tensor(np.array(action_batch), dtype=np.int64) if action_batch[0].dtype == np.int64 else To_tensor(np.vstack(action_batch)),
To_tensor(np.array(td_target_buffer)[:, None])
)
Optimizer.zero_grad() # Initialize Optimizer
loss.backward() # Calculate gradient of loss function
torch.nn.utils.clip_grad_norm_(Local_A2C.parameters(), 20) # Gradient clipping
# 'x.grad' : brings x-partial gradient value of loss function
for local_para, global_para in zip(Local_A2C.parameters(), Global_A2C.parameters()) :
global_para._grad = local_para.grad
# Do Optimize the loss function
Optimizer.step()
'''
Pull : Parameter of Local_A2C <--- Parameter of Global_A2C
'''
Local_A2C.load_state_dict(Global_A2C.state_dict())
return
'''
Test the trained model
'''
def test_model(Global_Model, env) :
print('\n >> Test Begin...')
NUM_EPI = 30
MAX_STEPS_EPI = 200
Result_List = []
for epi in tq.tqdm(range(NUM_EPI)) :
state = env.reset()
done = False
rewards_epi = 0
for step in range(1, MAX_STEPS_EPI) :
env.render()
action = Global_Model.action_selection(To_tensor(state[None, :]))
next_state, reward, done, _ = env.step(action.clip(-2, 2))
rewards_epi += reward
if done or step == MAX_STEPS_EPI-1 :
Result_List.append(rewards_epi)
break
state = next_state
Show_Result(Result_List)
return
'''
Plotting Result
'''
def Show_Result(res) :
import matplotlib.pyplot as plt
import pandas as pd
import uuid
file_name = 'Result - '+str(uuid.uuid4())[:8]
plt.figure(figsize=(27, 13))
plt.ylabel('Epi- Reward', size=15)
plt.xlabel('Step', size=15)
plt.plot(res, marker='^')
fig = plt.gcf()
fig.savefig(file_name+'.png')
plt.clf()
plt.close()
return
|
986,640 | ce42b472970912a02dd47bdf3e7025e3bab0868a | #This script preps the required .RData: pwy_neuroNetwork_prep.R
#Load the .RData (based on: https://stackoverflow.com/questions/21288133/loading-rdata-files-into-python)
#Do this in terminal: pip install pyreadr
import numpy as np
import pandas as pd
import pyreadr
import os
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
os.chdir("/Users/peterwu/Google Drive File Stream/Shared Drives/HuLab/Nichols_Data_mining/")
objects = pyreadr.read_r('Data/phenoForPythonMachineLearning.RData')
pwyNewPheno=objects["pwyNewPheno"]
pwyLabel=objects["pwyLabel"]
type(pwyNewPheno) #pandas.core.frame.DataFrame
type(pwyLabel) #pandas.core.frame.DataFrame
pwyNewPheno.shape #(1954, 324)
pwyLabel.shape #(2344, 1)
#create traning set and test set
X=pwyNewPheno
y=pwyLabel
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state=42,stratify=y) #not sure if stratify=y works when there are more than 2 classes for the labels
#instantiate the model + fit the data
##I want to try dicision tree first
dt = DecisionTreeClassifier(random_state=43)
dt.fit(X_train, y_train)
#predict the test data and calculate accuracy, sensitivity...etc
y_pred=dt.predict(X_test)
acc = accuracy_score(y_test, y_pred) #0.018 (completely failed)
#Use label matrix
#===============================================================================
#Do this in terminal: pip install pyreadr
import pyreadr
import os
exec(open("/Users/peterwu/Google Drive File Stream/Shared Drives/HuLab/Nichols_Data_mining/test/machine_learning/DecisionTreePipeline.py").read()) #import the function I defined
os.chdir("/Users/peterwu/Google Drive File Stream/Shared Drives/HuLab/Nichols_Data_mining/")
objects = pyreadr.read_r('Data/sourced/All_Data_NAimputed.RData')
NewPheno=objects["All_Data_NAimputed"]
objects = pyreadr.read_r('Data/pwyLabel_df.RData')
Label=objects["pwyLabel_df"]
type(NewPheno) #pandas.core.frame.DataFrame
type(Label) #pandas.core.frame.DataFrame
NewPheno.shape
Label.shape
metrics_df=DecisionTreePipeline(NewPheno,Label)
metrics_df.to_csv("test/machine_learning/pathway.csv") |
986,641 | 3fe09b6fb3a9334cb454fb40cc10d7a01de58add | # String
# Problem: 20291
# Memory: 42440KB
# Time: 2440ms
N = int(input())
ans = {}
for i in range(N):
s = input().split('.')[1]
if s in ans:
ans[s] += 1
else:
ans[s] = 1
for k, v in sorted(ans.items()):
print(k + ' ' + str(v))
|
986,642 | 6d3a3895bc8f01370a245e97a55ca4761f9822ed | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module docstring: One line description of what your program does.
"""
__author__ = "Your Github Username"
import sys
def main(args):
"""Add your code here"""
if __name__ == '__main__':
main(sys.argv)
|
986,643 | a513c23d5b9c9fa866a1f311b292158bb5edde33 | """
循环队列
"""
class CircleQueue(object):
def __init__(self, capacity=10):
# 存放数组元素的下标
self.__front = 0
self.__size = 0
self.__elements = [None] * (10 if capacity < 10 else capacity)
def __str__(self):
string = 'capcacity=' + str(len(self.__elements)) + ', size=' + str(self.__size)
string += ', 起始索引 front = ' + str(self.__front)
string += ', ['
for i in range(0, len(self.__elements)):
if i != 0:
string += ','
string += str(self.__elements[i])
string += ']'
return string
def size(self) -> int:
"""
返回循环队列的长度
:return:
"""
return self.__size
def clear(self):
"""
清空循环队列
:return:
"""
self.__elements = [None for item in self.__elements]
self.__front = 0
self.__size = 0
def is_empty(self) -> bool:
"""
判断循环队列是否为空
:return: bool
"""
return self.__size == 0
def en_queue(self, element):
"""
从尾部入队列
:param element: 元素
:return:
"""
self.__ensure_capacity(self.__size + 1)
real_index = self.__index(self.__size)
self.__elements[real_index] = element
self.__size += 1
def de_queue(self):
"""
从队头出队列
:return:
"""
front_element = self.__elements[self.__front]
self.__elements[self.__front] = None
self.__front = self.__index(1)
self.__size -= 1
return front_element
def front(self):
"""
查看队列头部元素
:return:
"""
return self.__elements[self.__front]
def __ensure_capacity(self, capacity):
"""
扩容
:param capacity: 新的容量大小
:return:
"""
old_capacity = len(self.__elements)
if old_capacity >= capacity:
# 老的容量够用,不做任何处理
return
# 下面容量不够用,扩容
new_capacity = old_capacity + (old_capacity >> 1)
new_data = [None] * new_capacity
# 这一定要注意 索引转化为真实的索引后在转移到新数组中
new_data[:self.__size] = [self.__elements[self.__index(i)] for i in range(self.__size)]
self.__elements = new_data
# 扩容后要重置索引
self.__front = 0
def __index(self, index):
"""
将传入的索引转化为真正的索引
:param index: 原来的索引
:return: 真正的索引
"""
return (self.__front + index) % len(self.__elements)
|
986,644 | db843a58dcec7d3edba68811ea695cba20b5ad8c | #!/usr/bin/python
import sys
import os
import csv
from math import radians, cos, sin, asin, sqrt
def usage():
print >> sys.stderr, '\n' + str(THISFILENAME)+" [csv file]" + '\n'
sys.exit(0)
#CLIENT:
if __name__ == '__main__':
#SIGINT
#signal.signal(signal.SIGINT, signal_handler)
if( len(sys.argv) < 3 ):
usage()
elif( len(sys.argv) == 3 ):
file = sys.argv[1]
file1 = sys.argv[2]
data = []
#data1 = []
#data2 = []
data5 = []
data20 = []
data30 = []
datag30 = []
datafile1 = open(file, 'r')
readerg = csv.reader(datafile1)
for row in readerg:
data.append(row)
#rtt1 = 0.0
#rtt2 = 0.0
for d3 in data:
speed = int(d3[3])
if(speed < 6):
data5.append(d3)
elif(speed < 21):
data20.append(d3)
elif(speed < 31):
data30.append(d3)
else:
datag30.append(d3)
with open(file1 + "5.csv", "a") as f5:
w5 = csv.writer(f5)
w5.writerows(data5)
with open(file1 + "20.csv", "a") as f20:
w20 = csv.writer(f20)
w20.writerows(data20)
with open(file1 + "30.csv", "a") as f30:
w30 = csv.writer(f30)
w30.writerows(data30)
with open(file1 + ">30.csv", "a") as fg30:
wg30 = csv.writer(fg30)
wg30.writerows(datag30)
else:
usage()
|
986,645 | f48ef8a67a6155e311d25c2c45716d992ffea327 | #!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='iohitman',
version='3.0',
description='Parallel I/O tester',
author='stevec7',
packages=['iohitman', 'iohitman.testsuite'],
package_dir = {
'iohitman': 'iohitman',
'iohitman.testsuite': 'iohitman/testsuite',
}
scripts = [
'contrib/bin/run_iohitman.py',
],
)
|
986,646 | f32cad1cf8fdc711ed80cdd6b8bbf84111ffbf20 | int_h, int_w = map(int, input().split())
masu = list()
for i in range(int_h):
masu.append(list(input()))
conv = list()
for x in list(map(list, zip(*masu))):
if "#" in x:
conv.append(x)
result = list()
for y in list(map(list, zip(*conv))):
if "#" in y:
result.append(y)
for r in result:
print("".join(r))
|
986,647 | f6d3a4f45c54e636188f741289390fcef3d6702f | '''
Description:
Version: 2.0
Author: xuchaoxin
Date: 2021-03-06 20:17:12
LastEditors: xuchaoxin
LastEditTime: 2021-03-07 12:58:14
'''
""" 堆排序
堆中定义以下几种操作:
○ 最大堆调整(Max Heapify堆积化):
§ 将堆的末端子节点作调整,使得子节点永远小于父节点
○ 创建最大堆(Build Max Heap):(核心算法)
§ 将堆中的所有数据重新排序
○ 堆排序(HeapSort):
§ 移除位在第一个数据的根节点,并做最大堆调整的递归运算 (可以不递归)
"""
import generate_randomInt
def big_endian(arr,start,end): #big_endian大端法
"""[设计本函数的初衷的直接目的用来解决:某个已经总体上(由于顶端元素被替换,导致堆结构被破坏)已经满足堆的定义的完全二叉树做调整(将堆顶元素通过调整,放到合适的位置,使得该堆真正满足堆的定义),通过配合循环,合理的逆序调用本函数,可以用来初始换一些杂乱无章的数列,使之满足堆的结构(我们届时用build_heap()函数来组织big_endian()的逆序调用,从而实现初始化建堆的目的)
这是核心函数,提供按深度(而非广度)调整大根堆的服务,从给定的start节点开始,向深层的节点执行,直到扫描的end(给定终点);
值得注意的是,从start节点通往end节点的过程中不是逐个遍历的,
(体现在:一颗满足堆性质的完全二叉树是可以看成两部分(两颗兄弟子树,当其中一颗子树从堆结构被破坏后,该子树就不满足堆的性质;但是另一颗子树并没有受到影响,任然满足堆结构),故而,欲使得该树重新满足堆结构只需要调整被破坏的那一棵子树,而不需要去动另一颗子树。即向深处调整,而不是广度调整。)
这个函数有两方面的用途。
那么,一次big_endian调用到底会调整多少次位于不同层次的三元子堆呢(在同一次调用中,不同深度的三元子堆树最多调整一次)
可以确定的是,这个函数是从给定起点start开始向后遍历到给定的终止节点end,
一方面用途(复杂度较高,反复滚动扫描调整),
对于最开始建堆,是一个反复滚动调用big_endian()的过程,
从给定起点start开始向后遍历到最后一个节点end在这时是确定的end=len(arr)-1(即同终点,(是叶子节点))
这些调用存在先后关系,但不存在嵌套调用的关系,后期的调用要比前期的调用执行更多的判断/对堆的结构可能做出更多的调整行为(滚动)]
另一方面(复杂度较低,一次扫描调整),是基于初始建堆完成之后的完全二叉树,这时的二叉树总体上满足堆定义,但是由于堆顶的调换,导致结构被破坏,这时候只需要重新从堆顶处开始调用big_endian()执行一次扫描调整(该过程会沿着不满足堆结构的子树不断深入深层去调整,每一级调整中(如果需要),都只会调整两棵同级子树中的一颗,另一颗任然是满足二叉树的定义,这一点又该二叉树总体上满足堆定义做出前提保证)
Args:
arr (list): [表示待排序数列的堆(列表形式)]
start (int):[堆的完全二叉树形态下的,需要heapify的节点区间的左边界索引(在arr中的位置索引),即从那个节点开始heapify调整)]
end (int): [需要heapify的节点区间的右边界索引(在arr中的位置索引)/可以用来判断何时停止调整循环:child<=end]
"""
""" 注意,这里用的是列表(索引从0开始,0,1,2(即左孩子的索引编号是偶数,右孩子节点的编号是奇数(2*i+1))) """
root=start #当前级别(深度)的(子)树的根节点(root的值是要被不断更新的)
child=root*2+1 #child记录兄弟节点中的较大者的索引,初始化为左孩子元素的编号索引(child的值也要不断更新/重新计算)
""" root的变化趋势是越来越大(child=root*2+(1/2),当然也是变大的趋势) """
""" 采用循环策略 """
# while child<=end:#如果一个节点的左子节点的理论标号>end,说明该节点的左子节点不存在,有因为是完全二叉树,右节点更加不会存在。
# #child比最后一个节点的编号还大,说明此时的root已经是叶子节点了,就应该退出循环
# if child+1<=end :#如果child+1>end,则表明该非叶子节点只有一个孩子节点(左孩子)
# #保证大孩子索引child指向正确
# if arr[child]<arr[child+1]:
# #为了始终让其跟两个子节点元素中的较大值比较(让较大值当左孩子),如果右边大就更新child的值(使得它指向右孩子),左边大的话就默认
# child+=1
# """ 判断是否需要对该三元堆进行节点调整,否则break """
# if arr[root]<arr[child]:
# #父节点小于子节点中的较大者,则直接交换元素位置,同时坐标也得更新
# arr[root],arr[child]=arr[child],arr[root]
# # 同时更新root值并重新计算child的值,这样下次循环可以准确判断:是否为最底层,
# root=child
# child=root*2+1
# else:
# break
""" 采用递归的策略 """
if child<=end:#表明root节点是非叶子节点(其左孩子存在,因为child<=end)
if child+1<end :#如果同时存在右孩子节点,则比较处那个孩子节点较大,并将对应节点的索引赋值给child(更新)
if arr[child]<arr[child+1]:
child+=1
if arr[child]>arr[root]:
"""执行调整操作:交换元素位置 """
arr[child],arr[root]=arr[root],arr[child]
big_endian(arr,child, end)#所有递归目标同终点end,child是变化的
""" build_heap()不是必须,可以直接写在sort中 """
def build_heap(arr):
reverse_first=len(arr)//2 - 1 #第一个非叶子节点元素的索引;或则写reverse_first=(len(arr)-1)//2
# size=len(arr)
lastIndex=len(arr)-1
""" range()步长为-1,产生序列:reverse_first到0,左闭右开 """
#初始化建堆:执行逆序heapify()
for reverse_roll_start in range(reverse_first,-1,-1):#索引变量为reverse_roll_start
#从下到上,从右到左,对每个节点进行调整,循环调用big,得到非叶子节点
#去调整所有的节点;这里的reverse_roll_start跟随着索引变量的前进而前进(变化);所有调用同终点:lastIndex
big_endian(arr,reverse_roll_start,lastIndex)
def heap_sort(arr): #大根堆排序
build_heap(arr)
lastIndex=len(arr)-1
""" 每执行一次循环,就有一个元素被正确排入到结果序列
总共需要排序lastIndex次,即len(arr)-1次
end>=1,end-1>=0"""
for end in range(lastIndex,0,-1): #索引变量为end(表示该趟heapify()的处理区间[start=0,end]的右边界索引) , 从序列的最后一个元素(叶子节点)开始逆序遍历到第2个位置()
arr[0],arr[end]=arr[end],arr[0] #顶部尾部互换位置 ,将为有序区增加一个元素(而在最后一次调换时,次小的元素的有序同时也使得最小的元素放在了合适的位置)
#重新调整子节点,使得整个堆仍然满足堆的定义,每次heapify都从顶开始调整(start=0);所有调用同起点start=0
big_endian(arr,0,end-1)
#可以考虑仿射big_endian函数
return arr
def main():
# l=[111,7,2,9,11,66,22,11]
l=generate_randomInt.generate(50)
print(heap_sort(l))
# if __name__=="__main__":
# main()
main()
|
986,648 | 54d57f8a2e92cd9569d78f12c28ec971911207d1 | import os
import sys
import json
import subprocess
import csv
import time
import math
from optparse import OptionParser
from shutil import copyfile
def CliCommand(cmd,cb=None):
print(cmd)
try:
# stdout = subprocess.PIPE lets you redirect the output
res = subprocess.Popen(['/bin/bash', '-c', cmd],stdout=subprocess.PIPE)
except OSError:
print( "error: popen")
exit(1) # if the subprocess call failed, there's not much point in continuing
ret = bytearray()
while 1:
line = res.stdout.readline()
if not line: break
# sys.stdout.write(line.decode('utf-8'))
if cb:
cb(line)
ret+=line
res.wait() # wait for process to finish; this also sets the returncode variable inside 'res'
if res.returncode != 0:
print(" os.wait:exit status != 0\n")
sys.exit(1)
else:
print ("os.wait:({},{})".format(res.pid, res.returncode))
return ret.decode('utf-8')
MSG_USAGE = "migrate"
optParser = OptionParser(MSG_USAGE)
optParser.add_option("-s","--sn", action = "store", type = "string", dest = "sn")
optParser.add_option("-f","--from", action = "store", type = "int", dest = "fromtime",default=0)
optParser.add_option("-t","--to", action = "store", type = "int", dest = "totime",default=0)
optParser.add_option("-c","--comment", action = "store", type = "string", dest = "comment")
optParser.add_option("-p","--path", action = "store", type = "string", dest = "path")
optParser.add_option("-i","--state", action = "store", type = "string", dest = "state") # getface,recogface
(options, args) = optParser.parse_args()
print( options)
fromtime=0
state=options.state
sn=options.sn
fromtime=options.fromtime
totime=options.totime
comment=options.comment
path=options.path
csvpath=path+comment
if not os.path.exists(path+"negtive"):
os.mkdir(path+"negtive")
if not os.path.exists(path+"positive"):
os.mkdir(path+"positive")
with open(csvpath, 'a', newline='') as csvfile:
writer=csv.writer(csvfile,quotechar = "'")
writer.writerow(['url','time','face_score','got_face_cost','got_face','got_id_cost','got_id'])
for dirPath, dirNames, fileNames in os.walk(path):
print ("%s"%dirPath)
for f in sorted(fileNames):
filename, file_extension = os.path.splitext(f)
fsplit = filename.split("-")
get_face_start_time=time.time()
if fsplit[0] != sn:
continue
ev_time=int(fsplit[1])
if (fromtime > 0 and ev_time<fromtime) or (totime>0 and ev_time>totime):
continue
named_tuple = time.localtime(ev_time+28800)
time_string = time.strftime("%Y_%m_%d_%H:%M:%S", named_tuple)
if state != "recogface":
if file_extension == ".flv":
dstjpg="%s-%s-%s.jpg"%(fsplit[0],fsplit[1],time_string)
dstjpgPath="%s/%s"%(path,dstjpg)
srcflv="%s/%s"%(path,f)
cli = CliCommand("python ./api_get_video_face_v4.py %s %s 6000"%(srcflv,dstjpgPath))
get_face_end_time=time.time()
print(cli)
facejpgUrl="=IMAGE(\"http://35.201.225.82:8080/image/%s\")"%(dstjpg)
got_face=0
getface = json.loads(cli)
if getface['face_detected'] is True:
print("%s got face"%(f))
else:
with open(csvpath, 'a', newline='') as csvfile:
writer=csv.writer(csvfile,quotechar = "'")
writer.writerow([facejpgUrl,ev_time,getface['face_score'],round(get_face_end_time-get_face_start_time,1),got_face,0,'0'])
continue
hitjpgPath="%s/%s-%s-%s.jpg"%(path,fsplit[0],fsplit[1],time_string)
cli= CliCommand("python /home/tommy/vca-facerecog/src/cvpy/api_face_recogn_v2.py /home/tommy/vca-facerecog/src/cvpy/face_cache/home.json %s %s"%(dstjpgPath,hitjpgPath))
print(cli)
get_id_end_time=time.time()
getid = json.loads(cli)
print(getid)
if getface['face_detected'] is True:
got_face=1
if len(getid['detected'])>0:
idjpg="%s-%s-%s-%s.jpg"%(fsplit[0],fsplit[1],time_string,getid['detected'][0])
idjpgPath="%s/%s"%(path,idjpg)
copyfile(hitjpgPath,idjpgPath)
hitfacejpgUrl="=IMAGE(\"http://35.201.225.82:8080/image/%s\")"%(idjpg)
print("%s got %s"%(f,getid['detected'][0]))
with open(csvpath, 'a', newline='') as csvfile:
writer=csv.writer(csvfile,quotechar = "'")
writer.writerow([hitfacejpgUrl,ev_time,getface['face_score'],round(get_face_end_time-get_face_start_time,1),got_face,round(get_id_end_time-get_face_end_time,1),getid['detected'][0]])
folder1="%s/positive/%s"%(path,idjpg)
copyfile(hitjpgPath,folder1)
else:
hitfacejpg="=IMAGE(\"http://35.201.225.82:8080/image/%s-%s-%s-?.jpg\")"%(fsplit[0],fsplit[1],time_string)
print("got anybody")
with open(csvpath, 'a', newline='') as csvfile:
writer=csv.writer(csvfile,quotechar = "'")
writer.writerow([hitfacejpg,ev_time,getface['face_score'],round(get_face_end_time-get_face_start_time,1),got_face,round(get_id_end_time-get_face_end_time,1),'0'])
folder2="%s/negtive/%s-%s-%s-?.jpg"%(path,fsplit[0],fsplit[1],time_string)
copyfile(hitjpgPath,folder2)
else:
if file_extension == ".jpg":
if len(fsplit) != 3: #eg:SWB000mgLbUm-1568293362-2019_09_12_21:02:42.jpg
continue
hitjpg="%s/%s-%s-%s-hit.jpg"%(path,fsplit[0],fsplit[1],fsplit[2])
srcjpg="%s/%s"%(path,f)
cli= CliCommand("python /home/tommy/vca-facerecog/src/cvpy/api_face_recogn_v2.py /home/tommy/vca-facerecog/src/cvpy/face_cache/home.json %s %s"%(srcjpg,hitjpg))
print(cli)
get_id_end_time=time.time()
getid = json.loads(cli)
print(getid)
if len(getid['detected'])>0:
hitfacejpg="%s-%s-%s-%s.jpg"%(fsplit[0],fsplit[1],fsplit[2],getid['detected'][0])
hitfacejpgPath="%s/%s.jpg"%(path,hitfacejpg)
os.rename(hitjpg,hitfacejpg)
hitfacejpgURL="=IMAGE(\"http://35.201.225.82:8080/image/%s\")"%(hitfacejpg)
print("%s got %s"%(f,getid['detected'][0]))
with open(csvpath, 'a', newline='') as csvfile:
writer=csv.writer(csvfile,quotechar = "'")
writer.writerow([hitfacejpgURL,ev_time,getface['face_score'],round(get_face_end_time-get_face_start_time,1),1,round(get_id_end_time-get_face_end_time,1),getid['detected'][0]])
positive_folder="%s/positive/%s"%(path,hitfacejpg)
copyfile(facejpg,positive_folder)
|
986,649 | b101687c1a57b2bc02ee0ddcef6d5aa7c479a6da | class Solution:
def minSwap(self, A: List[int], B: List[int]) -> int:
n = len(A)
swap = [n for _ in range(n)]
not_swap = [n for _ in range(n)]
swap[0], not_swap[0] = 1, 0
for i in range(1, n):
if A[i] > A[i-1] and B[i] > B[i-1]:
not_swap[i] = not_swap[i-1]
swap[i] = swap[i-1] + 1
if A[i] > B[i-1] and B[i] > A[i-1]:
not_swap[i] = min(swap[i-1], not_swap[i])
swap[i] = min(not_swap[i-1]+1, swap[i])
return min(swap[-1], not_swap[-1])
|
986,650 | 9edbd4ea9dfedde07662df093f981f4fdfd8013e | import sys
file_name = "binary_num.py"
try:
f = open(file_name, "r")
except FileNotFoundError:
print(f"File {file_name} does not exist")
sys.exit(0)
print ("Name of the file: ", f.name)
print ("Closed or not : ", f.closed)
print ("Opening mode : ", f.mode)
|
986,651 | 25ac7027a85567175e0440042fd5706851a061fc | ###configuration.properties
serial=input('Digite o serial..: ')
print (serial)
cnpj=input ('Digite o CNPJ (Somente Números)..: ')
print (cnpj)
printer = input ('Digite a Porta da Impressora...: ')
print (printer)
config='''configuration
# *************** Geral ******************
# Password para acessar o administrativo
adminPassword=33638205
# Timeout da aplicação em milisegundos
timeout=40000
# Habilita mouse
mouseEnabled=false
# Modo fullscreen
fullscreenModeEnabled=false
# Modelo Impressora (térmica) [diebold | engworks | engworks4i | elgin_bkt6112 | star_tup900 | default]
modelPrinter=elgin_bkt6112
# Porta da impressora para modelos de impressora térmica
portPrint=COM7
# Mensagem padrão para o display do PinPad
displayPinpad=Videosoft
# Captura cliques na Screen Logger API
screenLoggerEnabled=false
# Prefixo para o cupom fiscal que é enviado para as APIs de pagamento, no máximo 7 dígitos, lojapdv
prefixCupomFiscal=1150101
# Habilita pagamento
paymentEnabled=true
# Habilita pagamento mínimo
paymentMinimumEnabled=true
# Habilita botão de desbloqueio de cartão(caso esteja desabilitado, não irá considerar o valor vindo dos CSVs)
cardReleaseEnabled=false
# ************* Database ******************
# URL de conexão do banco de dados
jdbcUrl=jdbc:mysql://localhost:3306/totem_calcard?createDatabaseIfNotExist=true&useSSL=false
# Usuário do banco de dados
jdbcUser=root
# Senha do banco de dados
jdbcPassword=root
# ************* VS AutoPag ****************
# Path do Client de Pagamento com Cartão
vsAutoPagSeCommand=C:\\Program Files\\videosoft\\vs-auto-pag-se\\vs-auto-pag-se-daemon.bat
# ************* VS Conductor ****************
# Path do Client de Pagamento do Módulo Conductor
vsConductorCommand=C:\\Program Files\\videosoft\\vs-conductor\\vs-conductor-daemon.bat
# ***************** Webservice *****************
webserviceContaCartaoService=http://10.19.253.48:30008/ContaCartaoService.asmx?wsdl
webserviceWsCdtConductor=http://10.19.253.48:20008/WS_CDTConductor.asmx?wsdl
apiCalsystemCard=https://totem.calcard.com.br/api/card-engine/v2
apiCalsystemAuth=https://totem.calcard.com.br/oauth
apiCalsystemAccount=https://totem.calcard.com.br/api/account-engine/v4
apiCalsystemRenegotiation=https://totem.calcard.com.br/api/renegotiation-engine/v2
apiCalsystemTotemEngine=https://totem.calcard.com.br/api/totem-engine/v2
apiCalsystemPaymentPCH=https://pch-totem.calcard.com.br/api/totem-app/v2
apiCalsystemCardUser=totem
apiCalsystemCardPassword=d8007891-4207-4cc5-b942-3c24620e33d6
# ***************** Proxy *****************
# Caso a rede não tenha proxy, deixar os campos em branco
# Host
proxyHost=
# Porta
proxyPort=
# Usuário
proxyUser=
# Senha
proxyPassword=
'''
#### prod.properties
# *************** Geral ******************
# Password para acessar o administrativo
adminPassword=33638205
# Prefixo para o cupom fiscal que é enviado para as APIs de pagamento, no máximo 7 dígitos, lojapdv
prefixCupomFiscal=1150101
# ***************** Webservice *****************
prod="""
apiCalsystemCardUser=TOTEM_8205
apiCalsystemCardPassword=8205
apiCalsystemCard=https://pch-totem.calcard.com.br/api/totem-app/v1
apiCalsystemAuth=https://pch-totem.calcard.com.br/api/totem-app/v1
apiCalsystemAccount=https://pch-totem.calcard.com.br/api/totem-app/v1
apiCalsystemRenegotiation=https://pch-totem.calcard.com.br/api/totem-app/v2
apiCalsystemTotemEngine=https://pch-totem.calcard.com.br/api/totem-app/v1
apiCalsystemPaymentPCH=https://pch-totem.calcard.com.br/api/totem-app/v2
"""
config_autopag= """
# ***************** Geral *****************
# Timeout da aplica\u00c3\u00a7\u00c3\u00a3o em segundos
timeout=59
# Modelo Impressora (térmica) [diebold | engworks | engworks4i | elgin_bkt680 | elgin_bkt6112 | bematech_mp_4200_th | star_tup900 | default]
modelPrinter=elgin_bkt6112
# Porta da impressora para modelos de impressora térmica
portPrint=COM7
# Corta o papel ap\u00c3\u00b3s a impress\u00c3\u00a3o do comprovante
cutPaper=true
# Modo fullscreen, caso seja false, ser\u00c3\u00a1 maximizado a janela
fullscreenMode=true
# Habilita/desabilita ponteiro do mouse
mouseEnable=false
# Tempo em segundos para apresenta\u00c3\u00a7\u00c3\u00a3o das mensagens pertinentes ao cliente
messageTime=5
# Tempo em segundos para apresenta\u00e7\u00e3o da \u00faltima mensagem
endMessageTime=5
# Bloquear utiliza\u00c3\u00a7\u00c3\u00a3o do terminal em caso de problemas na impressora
blockTerminal=true
# Mensagem padr\u00c3\u00a3o para o display do PinPad
displayPinpad=Videosoft
# Mostra imagem estatica na tela de inserir o cart\u00c3\u00a3o no pinpad
showImageInsertCard=false
# Mostra imagem estatica na tela de inserir a senha
showImagePassword=false
# ************* Database ******************
# URL de conex\u00c3\u00a3o do banco de dados
jdbcUrl=jdbc:mysql://localhost:3306/vs_auto_pag_se?createDatabaseIfNotExist=true&useSSL=false
# Usu\u00c3\u00a1rio do banco de dados
jdbcUser=root
# Senha do banco de dados
jdbcPassword=root
# ***************** Sitef *****************
# Host
sitefHost=192.168.42.18
# StoreID
sitefStoreId=54009064
# Terminal ID
sitefTerminalId=AA090101
#Par\u00c3\u00a2metros adicionais usados na ConfiguraIntSiTefInterativeEx, exigido para ativar o Cielo Premia
#[VersaoAutomacaoCielo=AAAAAAAACR], onde:
#AAAAAAAA = Nome da Software House da automa\u00c3\u00a7\u00c3\u00a3o (8 bytes)
#C = Deve ser 1 se a automa\u00c3\u00a7\u00c3\u00a3o est\u00c3\u00a1 preparada para tratar o desconto e as transa\u00c3\u00a7\u00c3\u00b5es da Plataforma Promocional e 0 caso contr\u00c3\u00a1rio
#R = Campo reservado, deve ser enviado 0.
#sitefParamAdditional=[VersaoAutomacaoCielo=VIDEOSOF00]
sitefParamAdditional=
# Par\u00c3\u00a2metros adicionais para pagamentos do Sitef
#sitefPaymentParamAdditional={TipoTratamento=4}
sitefPaymentParamAdditional=
# ***************** Proxy *****************
# Caso a rede n\u00c3\u00a3o tenha proxy, deixar os campos em branco
# Host
proxyHost=
# Porta
proxyPort=
# Usu\u00c3\u00a1rio
proxyUser=
# Senha
proxyPassword=
"""
|
986,652 | 290552a4e97b45409b25a2e29f4cb7635a2fcaef | '''
Cracking the Coding Interview
Question 2-4
p.94
(For a *singly linked* list)
'''
# Define the Linked List Node
class Node:
def __init__(self, data=None, next=None):
self.data = data
self.next = next
def __repr__(self):
cur = self
st = []
while cur:
if cur.next:
st.append(str(cur.data))
st.append('->')
else:
st.append(str(cur.data))
cur = cur.next
return ''.join(st)
# Actual algorithm
def partition(head, x):
gHead = None
g = None
lHead = None
l = None
while head:
if head.data >= x:
if g:
g.next = Node(head.data)
g = g.next
else:
g = Node(head.data)
gHead = g
else:
if l:
l.next = Node(head.data)
l = l.next
else:
l = Node(head.data)
lHead = l
head = head.next
if not lHead: lHead = gHead
else: l.next = gHead
return lHead
# Testing
a = Node(2)
b = Node(2, a)
c = Node(10, b)
d = Node(5, c)
e = Node(8, d)
f = Node(5, e)
head = Node(3, f)
print head
head = partition(head, 5)
print head
|
986,653 | 1697338e9ada38ee0fb90da15d7933b1f3cf171c | #! /usr/bin/env python
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
import sys
import time
import datetime
import functions
def main(logPath,hostsPath,hoursPath,resourcesPath,blockedPath):
start = time.time()
print "Parsing the log file ..."
parsedData = functions.parseLogFile(logPath)
end = time.time()
print 'Done with parsing the log file in %i seconds.' % (end-start)
# This is how the data looks like after parsing:
# hosts = [row[0] for row in parsedData]
# timeStamps = [row[3] for row in parsedData]
# timeZones = [row[4] for row in parsedData]
# requests1 = [row[5] for row in parsedData]
# requests2 = [row[6] for row in parsedData]
# requests3 = [row[7] for row in parsedData]
# replies = [row[8] for row in parsedData]
# bytes = [int(row[-1]) for row in parsedData]
#Hosts:
print 'Working on Feature 1 ...'
start = time.time()
oFile = open(hostsPath,'w')
#Sort based on host name
sortedData = sorted(parsedData, key=lambda x: x[0])
hosts = [row[0] for row in sortedData]
iBeg = []
rowPre = []
sumBytes = []
hits = []
#Log the indexes where host name changes
for i, row in enumerate(sortedData):
if (row[0] != rowPre):
iBeg.append(i)
rowPre = row[0]
#Subtract consequtive list elements to find
#the number of hits by each host.
hits = [j-i for i,j in zip(iBeg[:-1], iBeg[1:])]
#Sort the number of hits in relation with host name indices
hitsSorted, iBegSorted = (list(i) for i in
zip(*sorted(zip(hits, iBeg), reverse=True)))
#Print the first 10
for i in range(0,9):
print >> oFile, "%s, %s" % (hosts[iBegSorted[i]],
hitsSorted[i])
oFile.close()
end = time.time()
print 'Done with Feature 1 in an additional %i seconds.' % (
end-start)
#Resources:
print 'Working on Feature 2 ...'
start = time.time()
oFile = open(resourcesPath,'w')
print resourcesPath
#Sort based on resource path:
sortedData = sorted(parsedData, key=lambda x: x[6])
requests2 = [row[6] for row in sortedData]
bytes = []
for row in sortedData:
try:
bytes.append(int(row[-1]))
#Byte field may be '-'
except ValueError:
bytes.append(0)
iBeg = []
rowPre = []
sumBytes = []
#Log the indexes where resource path changes
for i, row in enumerate(sortedData):
if (row[6] != rowPre):
iBeg.append(i)
rowPre = row[6]
#Sum the number of bytes sent from each resource path
for i in range(0,len(iBeg)-1):
sss = sum(bytes[iBeg[i]:iBeg[i+1]])
sumBytes.append(sss)
#Sort the number of bytes in relation with resource paths
sumBytesSorted, iBegSorted = (list(i) for i in
zip(*sorted(zip(sumBytes, iBeg), reverse=True)))
#Print the first 10
for i in range(0,9):
print >> oFile, "%s, %d" % (requests2[iBegSorted[i]],
sumBytesSorted[i])
oFile.close()
end = time.time()
print 'Done with Feature 2 in an additional %i seconds.' % (end-start)
#Hours
print 'Working on Feature 3 ...'
start = time.time()
oFile = open(hoursPath,'w')
timeStamps = [row[3][1:] for row in parsedData]
times = []
#Convert timeStamps to datetime objects
for t in timeStamps:
times.append(datetime.datetime.strptime(t,'%d/%b/%Y:%H:%M:%S'))
hits = []
iCuts = []
iBase = 0
tBase = times[0]
tCut = tBase
tEnd = times[-1]
while tCut <= tEnd: #Don't exceed the latest time in file
#Set cut-off time 60 mins ahead
tCut = tBase+datetime.timedelta(minutes=60)
#Scan from the base time (times[iBase]) onwards
#(No need to scan the past)
for i, t in enumerate(times[iBase:]):
#Log when you reach or first exceed the cut-off time
if t >= tCut:
iCuts.append(iBase+1)
hits.append(i) #Num. of hits from tBase to tCut
iBase = iBase+i #Update the cut-off index
tBase = t #Update the base time
break
#Sort the number of hits in relation with cut-off indices
hitsSorted, iCutsSorted = (list(i) for i in
zip(*sorted(zip(hits, iCuts), reverse=True)))
#Print the first 10
for i in range(0,9):
print >> oFile, "%s, %d" % (times[iCutsSorted[i]], hitsSorted[i])
oFile.close()
end = time.time()
print 'Done with Feature 3 in an additional %i seconds.' % (end-start)
#Blocked
print 'Working on Feature 4 ...'
start = time.time()
hosts = [row[0] for row in parsedData]
timeStamps = [row[3][1:] for row in parsedData]
requests = [row[6][0:6] for row in parsedData]
replies = [row[8] for row in parsedData]
times = []
oFile = open(blockedPath,'w')
#Convert timeStamps to datetime objects
for t in timeStamps:
times.append(datetime.datetime.strptime
(t, '%d/%b/%Y:%H:%M:%S'))
#Start a watchlist with its first row being empty entries
#host, tFirstAttempt, NumberOfAttempts, isBlocked, 'tBlock'
watchlist = ([[' ' , ' ' , ' ', ' ', ' ']])
for i, t in enumerate(times):
isLogin = (requests[i] == '/login')
goodLogin = (isLogin and replies[i] == '200')
failLogin = (isLogin and not(replies[i] == '200'))
r = 0
while True:
if hosts[i] == watchlist[r][0]: #If in the watchlist
isBlocked = watchlist[r][3] # = 1 if blocked
tBlock = watchlist[r][4] # time of block
if isBlocked:
if isinstance(tBlock,str):
tBlockStr = tBlock
else:
tBlockStr = tBlock.strftime(
'%d/%b/%Y:%H:%M:%S')
lt5min = ( (t-tBlock) <=
datetime.timedelta(minutes=5))
if lt5min:
#Log all these hits that would have been
#blocked
#Recover the unparsed format
print >> oFile, "%s" % (
parsedData[i][0]+' ' +
parsedData[i][1]+' ' +
parsedData[i][2]+' ' +
parsedData[i][3]+' ' +
parsedData[i][4]+' "'+
parsedData[i][5]+' ' +
parsedData[i][6]+' ' +
parsedData[i][7]+'" '+
parsedData[i][8]+' ' +
parsedData[i][9])
else: #5 min. past since the block
#Remove it from the watchlist
#(no history, no block)
del watchlist[r]
break #to check the next hit
elif failLogin:
#Not yet blocked but getting close!
#Increase failed attemp count by one
watchlist[r][2] += 1
#Was its first fail less than 20 sec. ago?
tFirstAttempt = watchlist[r][1]
lt20sec = ( (t - tFirstAttempt) <=
datetime.timedelta(seconds=20) )
#Is this its 3rd attempt yet?
is3attempts = (watchlist[r][2] == 3)
if lt20sec:
if is3attempts:
#Block the host and ...
watchlist[r][3] = 1
#... log the blocking time.
watchlist[r][4] = t
else:
#Infrequent login failure
#not getting watched.
#Remove it from the wathchlist
#(no history, no block)
del watchlist[r]
#Watchlist is updated according to the rules.
break #to check the next hit
elif goodLogin:
#Successful login while in watchlist
#but not blocked.
#Remove it from the wathchlist
#(no history, no block)
del watchlist[r]
break #to check the next hit
else: #Not yet seen in the watchlist
if failLogin:
#Keep checking the rest of the watchlist
r += 1
if r == len(watchlist):
#Not found in watchlist
#Create the watchlist entry and break
#for checking the next hit
watchlist.append([hosts[i],t,1,0,'-'])
break
elif goodLogin or not(isLogin):
#Nothing wrong with this hit
#No need to check the rest of the watchlist
break #to check the next hit
oFile.close()
end = time.time()
print 'Done with Feature 4 in an additional %i seconds.' % (end-start)
return
#-----------------------------------------------------------
if __name__ == '__main__':
logPath = sys.argv[1]
hostsPath = sys.argv[2]
hoursPath = sys.argv[3]
resourcesPath = sys.argv[4]
blockedPath = sys.argv[5]
main(logPath,hostsPath,hoursPath,resourcesPath,blockedPath)
|
986,654 | 03cd2e66f108367d0f8629e7b2a3df468d8265ef | from typing import Iterator
from utils.vector import Vector
def bresenham(v1: Vector, v2: Vector) -> Iterator[Vector]:
yield v1
x1, y1, z1 = v1
x2, y2, z2 = v2
dx, dy, dz = abs(x2 - x1), abs(y2 - y1), abs(z2 - z1)
xs = 1 if x2 > x1 else -1
ys = 1 if y2 > y1 else -1
zs = 1 if z2 > z1 else -1
if dx >= dy and dx >= dz:
p1 = 2 * dy - dx
p2 = 2 * dz - dx
while x1 != x2:
x1 += xs
if p1 >= 0:
y1 += ys
p1 -= 2 * dx
if p2 >= 0:
z1 += zs
p2 -= 2 * dx
p1 += 2 * dy
p2 += 2 * dz
yield Vector(x1, y1, z1)
elif dy >= dx and dy >= dz:
p1 = 2 * dx - dy
p2 = 2 * dz - dy
while y1 != y2:
y1 += ys
if p1 >= 0:
x1 += xs
p1 -= 2 * dy
if p2 >= 0:
z1 += zs
p2 -= 2 * dy
p1 += 2 * dx
p2 += 2 * dz
yield Vector(x1, y1, z1)
else:
p1 = 2 * dy - dz
p2 = 2 * dx - dz
while z1 != z2:
z1 += zs
if p1 >= 0:
y1 += ys
p1 -= 2 * dz
if p2 >= 0:
x1 += xs
p2 -= 2 * dz
p1 += 2 * dy
p2 += 2 * dx
yield Vector(x1, y1, z1)
def server_bresenham(v1: Vector, v2: Vector, map_size: int = 30) -> Iterator[Vector]:
x0, y0, z0 = v1
x1, y1, z1 = v2
dx, sx = abs(x1 - x0), 1 if x0 < x1 else -1
dy, sy = abs(y1 - y0), 1 if y0 < y1 else -1
dz, sz = abs(z1 - z0), 1 if z0 < z1 else -1
dm = max(1, max(dx, dy, dz))
i = dm
x1 = y1 = z1 = dm / 2
while True:
if map_size != 1:
if x0 < 0 or x0 >= map_size:
break
if y1 < 0 or y0 >= map_size:
break
if z0 < 0 or z0 >= map_size:
break
yield Vector(x0, y0, z0)
if Vector(x0, y0, z0) == v2:
break
if i == 0 and map_size == -1:
break
i -= 1
x1 -= dx
if x1 < 0:
x1 += dm
x0 += sx
y1 -= dy
if y1 < 0:
y1 += dm
y0 += sy
z1 -= dz
if z1 < 0:
z1 += dm
z0 += sz
|
986,655 | dd4964d8d139db09627d6a10fa72b1eebff71a76 | class Solution:
def averageWaitingTime(self, arr: List[List[int]]) -> float:
waitingTime = 0
finishTime = 0
for i in range(len(arr)):
x, y = arr[i]
if i ==0:
finishTime = x + y
waitingTime += y
elif x >= finishTime:
waitingTime += y
finishTime = x + y
else:
waitingTime += finishTime - x + y
finishTime += y
return waitingTime/len(arr)
|
986,656 | dfd5264a6c47ad672948d93334574ee43f49219e | import os, shutil, numpy, time, math
from shapefile import Reader, Writer
from .rasterutils import get_raster, get_raster_on_poly, get_raster_in_poly
from .merge_shapes import format_shape, combine_shapes
def get_distance(p1, p2):
"""Approximates the distance in kilometers between two points on the
Earth's surface designated in decimal degrees using an ellipsoidal
projection. per CFR 73.208 it is applicable for up to 475 kilometers.
p1 and p2 are listed as (longitude, latitude).
"""
deg_rad = math.pi / 180
dphi = p1[1] - p2[1]
phim = 0.5 * (p1[1] + p2[1])
dlam = p1[0] - p2[0]
k1 = (111.13209 - 0.56605 * math.cos(2 * phim * deg_rad) + 0.00120 *
math.cos(4 * phim * deg_rad))
k2 = (111.41513 * math.cos(phim * deg_rad) - 0.09455 *
math.cos(3 *phim * deg_rad) + 0.0012 * math.cos(5 * phim * deg_rad))
return numpy.sqrt(k1**2 * dphi**2 + k2**2 * dlam**2)
def get_distance_vector(catchpoints, closest):
"""Vectorized version of get_distance method (for computational efficiency).
"""
deg_rad = math.pi / 180
dphis = catchpoints[:, 1] - closest[:, 1]
phims = 0.5 * (catchpoints[:, 1] + closest[:, 1])
dlams = catchpoints[:,0] - closest[:,0]
k1s = (111.13209 - 0.56605 * numpy.cos(2 * phims * deg_rad) +
0.00120 * numpy.cos(4 * phims * deg_rad))
k2s = (111.41513 * numpy.cos(phims * deg_rad) - 0.09455 *
numpy.cos(3 * phims * deg_rad) + 0.0012 *
numpy.cos(5 * phims * deg_rad))
return numpy.sqrt(k1s**2 * dphis**2 + k2s**2 * dlams**2)
def get_overland(p1, p2, tolerance = 0.1, min_slope = 0.00001):
"""Returns the slope of the z-coordinate in the x-y plane between points
p1 and p2. Returns the min_slope if the points are too close together
as specified by the tolerance (km). Also return half the average length
from the catchment boundary to the flowline (since the average length
across each line is half the total length)."""
L = get_distance(p1, p2)
if L > tolerance: return L / 2., (p1[2] - p2[2]) / L / 100000
else: return tolerance, min_slope
def get_overland_vector(catchpoints, closest, tol = 0.1, min_slope = 0.00001):
"""Vectorized version of the get_overland function (for computational
efficiency)."""
length = get_distance_vector(catchpoints, closest)
slope = (catchpoints[:,2] - closest[:,2]) / length / 100000
for l, s in zip(length, slope):
if l < tol: l, s = tol, min_slope
return length / 2., slope
def get_centroid(points):
"""Calculates the centroid of a polygon with paired values of xs and ys."""
xs, ys = points[:, 0], points[:, 1]
a = xs[:-1] * ys[1:]
b = ys[:-1] * xs[1:]
A = numpy.sum(a - b) / 2.
cx = xs[:-1] + xs[1:]
cy = ys[:-1] + ys[1:]
Cx = numpy.sum(cx * (a - b)) / (6. * A)
Cy = numpy.sum(cy * (a - b)) / (6. * A)
return Cx, Cy
def combine_catchments(catchmentfile, flowfile, elevationfile, comid,
output = None, overwrite = False, verbose = True):
"""Combines together all the catchments in a basin catchment shapefile.
Creates a new shapefile called "combined" in the same directory as the
original file. Uses the elevation data from the raster file and the flow
data file to estimate the length and average slope of the overland flow
plane.
"""
t0 = time.time()
numpy.seterr(all = 'raise')
if output is None: output = os.getcwd() + r'\combined'
if os.path.isfile(output + '.shp') and not overwrite:
if verbose: print('combined catchment shapefile %s exists' % output)
return
if verbose: print('combining catchments from %s\n' % catchmentfile)
# start by copying the projection files
shutil.copy(catchmentfile + '.prj', output + '.prj')
# load the catchment and flowline shapefiles
c = Reader(catchmentfile, shapeType = 5)
f = Reader(flowfile, shapeType = 3)
# make lists of the comids and featureids
featureid_index = c.fields.index(['FEATUREID', 'N', 9, 0]) - 1
comid_index = f.fields.index(['COMID', 'N', 9, 0]) - 1
featureids = [r[featureid_index] for r in c.records()]
comids = [r[comid_index] for r in f.records()]
# check that shapes are traceable--don't have multiple points and start
# and end at the same place--then make an appropriate list of shapes
# and records--note it's more memory efficient to read one at a time
n = len(c.records())
shapes = []
records = []
bboxes = []
try:
for i in range(n):
catchment = c.shape(i)
record = c.record(i)
shape_list = format_shape(catchment.points)
for s in shape_list:
shapes.append(s)
records.append(record)
bboxes.append(catchment.bbox)
try: combined = combine_shapes(shapes, bboxes, verbose = verbose)
except: combined = combine_shapes(shapes, bboxes, skip = True,
verbose = verbose)
except:
shapes = []
records = []
bboxes = []
for i in range(n):
catchment = c.shape(i)
record = c.record(i)
shape_list = format_shape(catchment.points, omit = True)
for s in shape_list:
shapes.append(s)
records.append(record)
bboxes.append(catchment.bbox)
try: combined = combine_shapes(shapes, bboxes, verbose = verbose)
except: combined = combine_shapes(shapes, bboxes, skip = True,
verbose = verbose)
# iterate through the catchments and get the elevation data from NED
# then estimate the value of the overland flow plane length and slope
lengths = numpy.empty((n), dtype = 'float')
slopes = numpy.empty((n), dtype = 'float')
for i in range(n):
catchment = c.shape(i)
flowline = f.shape(comids.index(featureids[i]))
catchpoints = get_raster_on_poly(elevationfile, catchment.points,
verbose = verbose)
catchpoints = numpy.array([p for p in catchpoints])
zs = get_raster(elevationfile, flowline.points)
flowpoints = numpy.array([[p[0], p[1], z]
for p, z in zip(flowline.points, zs)])
# iterate through the raster values and find the closest flow point
closest = numpy.empty((len(catchpoints), 3), dtype = 'float')
for point, j in zip(catchpoints, range(len(catchpoints))):
closest[j] = flowpoints[numpy.dot(flowpoints[:, :2],
point[:2]).argmin()]
# estimate the slope and overland flow plane length
length, slope = get_overland_vector(catchpoints, closest)
if verbose: print('avg slope and length =', slope.mean(), length.mean())
lengths[i], slopes[i] = length.mean(), slope.mean()
if verbose: print('\nfinished overland flow plane calculations\n')
# get area of the subbasin from the catchment metadata
areasq_index = c.fields.index(['AreaSqKM', 'N', 19, 6]) - 1
areas = numpy.array([r[areasq_index] for r in c.records()])
# take the area weighted average of the slopes and flow lengths
tot_area = round(areas.sum(), 2)
avg_length = round(1000 * numpy.sum(areas * lengths) / tot_area, 1)
avg_slope = round(numpy.sum(areas * slopes) / tot_area, 4)
# get the centroid and the average elevation
combined = [[float(x), float(y)] for x, y in combined]
centroid = get_centroid(numpy.array(combined))
Cx, Cy = round(centroid[0], 4), round(centroid[1], 4)
elev_matrix, origin = get_raster_in_poly(elevationfile, combined,
verbose = verbose)
elev_matrix = elev_matrix.flatten()
elev_matrix = elev_matrix[elev_matrix.nonzero()]
avg_elev = round(elev_matrix.mean() / 100., 2)
# write the data to the shapefile
w = Writer(shapeType = 5)
fields = [['ComID', 'N', 9, 0],
['PlaneLenM', 'N', 8, 2],
['PlaneSlope', 'N', 9, 6],
['AreaSqKm', 'N', 10, 2],
['CenX', 'N', 12, 6],
['CenY', 'N', 12, 6],
['AvgElevM', 'N', 8, 2]]
record = [comid, avg_length, avg_slope, tot_area, Cx, Cy, avg_elev]
for field in fields: w.field(*field)
w.record(*record)
w.poly(shapeType = 5, parts = [combined])
w.save(output)
if verbose: print('\ncompleted catchment combination in %.1f seconds\n' %
(time.time() - t0))
|
986,657 | 274c71acf3c21e1df5316b6f9e01ca7f8977caa7 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.11
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_htmc', [dirname(__file__)])
except ImportError:
import _htmc
return _htmc
if fp is not None:
try:
_mod = imp.load_module('_htmc', fp, pathname, description)
finally:
fp.close()
return _mod
_htmc = swig_import_helper()
del swig_import_helper
else:
import _htmc
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class HTMC(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HTMC, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HTMC, name)
__repr__ = _swig_repr
def __init__(self, depth=10):
this = _htmc.new_HTMC(depth)
try: self.this.append(this)
except: self.this = this
def init(self, depth=10): return _htmc.HTMC_init(self, depth)
__swig_destroy__ = _htmc.delete_HTMC
__del__ = lambda self : None;
def lookup_id(self, *args):
"""
Class:
HTM
Method Name:
lookup_id
Purpose:
Return the index of the input ra/dec at the current htm depth. ra/dec may
be arrays.
Calling Sequence:
import esutil
h=esutil.htm.HTM(depth)
htmid = h.lookup_id(ra, dec)
Inputs:
ra,dec: Scalars or arrays of equal length.
Outputs:
htmid: An array with the htm id.
Example:
>>> import esutil
>>> h=esutil.htm.HTM(depth)
>>> htmid = h.lookup_id(ra, dec)
Revision History:
2010-03-03: SWIG wrapper completed. Erin Sheldon, BNL.
"""
return _htmc.HTMC_lookup_id(self, *args)
def intersect(self, *args):
"""
Class:
HTM
Method Name:
lookup_id
Purpose:
Return the index of the input ra/dec at the current htm depth. ra/dec may
be arrays.
Calling Sequence:
import esutil
h=esutil.htm.HTM(depth)
htmid = h.lookup_id(ra, dec)
Inputs:
ra,dec: Scalars or arrays of equal length.
Outputs:
htmid: An array with the htm id.
Example:
>>> import esutil
>>> h=esutil.htm.HTM(depth)
>>> htmid = h.lookup_id(ra, dec)
Revision History:
2010-03-03: SWIG wrapper completed. Erin Sheldon, BNL.
"""
return _htmc.HTMC_intersect(self, *args)
def cmatch(self, *args):
"""
Class:
HTM
Method Name:
lookup_id
Purpose:
Return the index of the input ra/dec at the current htm depth. ra/dec may
be arrays.
Calling Sequence:
import esutil
h=esutil.htm.HTM(depth)
htmid = h.lookup_id(ra, dec)
Inputs:
ra,dec: Scalars or arrays of equal length.
Outputs:
htmid: An array with the htm id.
Example:
>>> import esutil
>>> h=esutil.htm.HTM(depth)
>>> htmid = h.lookup_id(ra, dec)
Revision History:
2010-03-03: SWIG wrapper completed. Erin Sheldon, BNL.
"""
return _htmc.HTMC_cmatch(self, *args)
def cbincount(self, *args):
"""
Class:
HTM
Method Name:
lookup_id
Purpose:
Return the index of the input ra/dec at the current htm depth. ra/dec may
be arrays.
Calling Sequence:
import esutil
h=esutil.htm.HTM(depth)
htmid = h.lookup_id(ra, dec)
Inputs:
ra,dec: Scalars or arrays of equal length.
Outputs:
htmid: An array with the htm id.
Example:
>>> import esutil
>>> h=esutil.htm.HTM(depth)
>>> htmid = h.lookup_id(ra, dec)
Revision History:
2010-03-03: SWIG wrapper completed. Erin Sheldon, BNL.
"""
return _htmc.HTMC_cbincount(self, *args)
def depth(self):
"""
Class:
HTM
Method Name:
lookup_id
Purpose:
Return the index of the input ra/dec at the current htm depth. ra/dec may
be arrays.
Calling Sequence:
import esutil
h=esutil.htm.HTM(depth)
htmid = h.lookup_id(ra, dec)
Inputs:
ra,dec: Scalars or arrays of equal length.
Outputs:
htmid: An array with the htm id.
Example:
>>> import esutil
>>> h=esutil.htm.HTM(depth)
>>> htmid = h.lookup_id(ra, dec)
Revision History:
2010-03-03: SWIG wrapper completed. Erin Sheldon, BNL.
"""
return _htmc.HTMC_depth(self)
HTMC_swigregister = _htmc.HTMC_swigregister
HTMC_swigregister(HTMC)
class Matcher(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Matcher, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Matcher, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _htmc.new_Matcher(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _htmc.delete_Matcher
__del__ = lambda self : None;
def get_depth(self): return _htmc.Matcher_get_depth(self)
def match(self, *args): return _htmc.Matcher_match(self, *args)
Matcher_swigregister = _htmc.Matcher_swigregister
Matcher_swigregister(Matcher)
# This file is compatible with both classic and new-style classes.
|
986,658 | baf331bef4dd0df0f9f93ec687565f5517d3260a | import discord
import Session
import UserManager
class Command:
assigned_function = None
command = None
restricted = None
def __init__(self, command: str, restricted: bool, assigned_function):
self.command = command
self.restricted = restricted
self.assigned_function = assigned_function
async def process_command(self, session: Session, message: discord.Message):
user_manager = session.user_manager
if not message.content.startswith(self.command) or message.author == session.client.user:
return
user_allow = await user_manager.is_user_allowed(channel=message.channel, member=message.author)
if (not self.restricted) or (self.restricted and user_allow):
session.logger.info("User {user} issued {command}.".format(user = message.author.name, command = self.command))
await self.assigned_function(session, message)
else:
session.logger.info("User {user} issued {command} and was not allowed.".format(user = message.author.name, command = self.command))
await session.client.send_message(message.channel, "**You are not allowed to this!**")
|
986,659 | 6afd9c1e2f44a6f6b25e2c02a5a0f8b7d44037a6 | s00 = "kitten"
S01 = "sitting"
def get_dist(s00, s01):
print("Nidea") |
986,660 | 02eea9466dc420f9097fef795fce3e9b335ef9ec | """
Name:Kenzin Igor
Date:20/05/2021
Brief Project Description:Reading Tracker
GitHub URL:https://github.com/JCUS-CP1404/assignment-2-reading-tracker-igor5514
"""
# Create your main program in this file, using the ReadingTrackerApp class
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.spinner import Spinner
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.recycleview import RecycleView
from kivy.core.window import Window
from kivy.metrics import dp
from kivy.graphics import Color, Rectangle
from kivy.properties import ObjectProperty
from bookcollection import BookCollection
from book import Book
# Constants for work with files
FILENAME = 'books.csv'
class MainScreen(Screen):
"""Base screen."""
def __init__(self, books=None, **kwargs):
super().__init__(**kwargs)
self.books = books
self.main_box = None
def on_enter(self):
# Basic box
self.main_box = MainBox(self.books)
self.add_widget(self.main_box)
class BookButton(Button):
"""A button with a link to a specific book."""
def __init__(self, book, top_label, warn_label, **kwargs):
super().__init__(**kwargs)
self.book = book
self.top_label = top_label
self.warn_label = warn_label
self.set_color()
self.text = str(book)
def build(self):
return self
def set_color(self):
"""Setting the button color depending on the need / completion of reading"""
self.background_color = 'white' if self.book.is_completed else 'aqua'
def on_press(self):
"""Handling a book button click"""
if self.book.is_completed:
self.book.mark_required()
else:
self.book.mark_completed()
self.set_color()
self.text = str(self.book)
self.top_label.set_label_text()
text = 'You {} \'{}\'.{}'.format(
'completed' if self.book.is_completed else 'need to read',
self.book.title,
(' Great job!' if self.book.is_completed else ' Get started!') if self.book.is_long() else ''
)
self.warn_label.set_label_text(text)
class BookLabel(Label):
"""Base Label Class"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def build(self):
return self
class HeadLabel(BookLabel):
"""Top inscription."""
def __init__(self, collection=None, **kwargs):
super().__init__(**kwargs)
self.collection = collection
def set_label_text(self):
"""Displays the number of pages required to read"""
self.text = 'Pages to read: {}'.format(self.collection.get_required_pages())
class WarningLabel(BookLabel):
"""Bottom information label"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_label_text('Welcome to the Reading Tracker 2.0!')
self.warn = False
def set_label_text(self, text, warn=False):
"""Sets the text and color of the label"""
self.text = text
self.warn = warn
self.opacity = .8
if self.warn:
self.color = 'darksalmon'
else:
self.color = 'lightgreen'
self.on_size()
def on_size(self, *args):
"""Sets the background color of the label"""
self.canvas.before.clear()
if hasattr(self, 'warn'):
color = (.8, 0, 0) if self.warn else (0, .5, 0)
else:
color = (0, .5, 0)
with self.canvas.before:
Color(*color, 0.25)
Rectangle(pos=self.pos, size=self.size)
class MainBox(BoxLayout):
"""Base page layout class"""
# Links to parts of the page layout that are set in the kv file
spinner = ObjectProperty(None)
maingrid = ObjectProperty(None)
recycle = ObjectProperty(None)
headlabel = ObjectProperty(None)
warnlabel = ObjectProperty(None)
def __init__(self, books=None, **kwargs):
super().__init__(**kwargs)
self.books = books
self.init_grid()
#Markers for defining the internal name of an object
self.ids_obj = dict(zip(self.ids.values(), self.ids.keys()))
# Navigation markers on Text Input objects when tab is pressed
self.markers = [self.ids['add_title'], self.ids['add_author'], self.ids['add_pages']]
def init_grid(self):
"""Preliminary steps when initializing a page"""
self.headlabel.collection = self.books
self.headlabel.set_label_text()
self.warnlabel.set_label_text('Welcome to the Reading Tracker 2.0!')
self.building_grid(None, 'Author')
def building_grid(self, instance, value):
"""Building a list of books"""
self.books.sort(value)
# Building a scroll window
self.recycle.width = Window.width
self.recycle.height = Window.height - self.headlabel.height - self.warnlabel.height
self.maingrid.bind(
minimum_height=self.maingrid.setter('height')
)
# Redrawing the list of books
self.maingrid.clear_widgets()
for book in self.books:
self.maingrid.add_widget(
BookButton(
book=book,
top_label=self.headlabel,
warn_label=self.warnlabel,
text=str(book),
size_hint_y=None
)
)
def add_book(self, title_obj, author_obj, pages_obj):
"""Book Adding Handling"""
title, author, pages = map(str.strip, (title_obj.text, author_obj.text, pages_obj.text))
# Checking the correctness of input fields
if not title or not author or not pages:
self.warnlabel.set_label_text('All fields must be completed', True)
return
try:
pages = int(pages)
except ValueError:
self.warnlabel.set_label_text('Please enter a valid number', True)
return
if pages < 1:
self.warnlabel.set_label_text('Pages must be > 0', True)
return
self.warnlabel.set_label_text('You added a new book')
self.books.add_book(Book(title, author, pages))
self.headlabel.set_label_text()
self.building_grid(None, self.spinner.text)
self.clear_addfields(title_obj, author_obj, pages_obj)
def clear_addfields(self, title, author, pages):
"""Clearing book add fields"""
title.text = ''
author.text = ''
pages.text = ''
def text_control(self, field):
"""Controlling movement through tab input fields"""
if field.text.endswith('\t'):
field.text = field.text[:-1]
idx = self.markers.index(field)
field.focus = False
if idx == len(self.markers)-1:
self.markers[0].focus = True
else:
self.markers[idx+1].focus = True
class ReadingTrackerApp(App):
"""Basic application"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.books = BookCollection()
try:
self.books.load_books(FILENAME, backup=True)
except (FileNotFoundError, LookupError):
pass
def build(self):
sm = ScreenManager()
sm.add_widget(MainScreen(self.books))
return sm
def on_stop(self):
"""Saving an updated book list file"""
self.books.save_books(FILENAME)
return super().on_stop()
if __name__ == '__main__':
ReadingTrackerApp().run()
|
986,661 | 585c6856b3dc34408d71270302bab915b0030934 | import math
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
class CustomLogisticRegression:
def __init__(self, fit_intercept=True, l_rate=0.01, n_epoch=100):
self.fit_intercept = fit_intercept
self.l_rate = l_rate
self.n_epoch = n_epoch
def sigmoid(self, t):
return 1 / (1 + pow(math.e, -t))
def predict_proba(self, row, coef_):
if self.fit_intercept:
t = coef_[0] + np.dot(row, coef_[1:])
else:
t = np.dot(row, coef_)
return self.sigmoid(t)
def fit_mse(self, X_train, y_train):
if self.fit_intercept:
self.coef_ = [0] * (len(X_train[0]) + 1) # initialized weights
else:
self.coef_ = [0] * len(X_train[0])
n = len(X_train)
mse_first = list()
mse_final = list()
for _ in range(self.n_epoch):
for i, row in enumerate(X_train):
y_hat = self.predict_proba(row, self.coef_)
# update all weights
if self.fit_intercept:
self.coef_[0] = self.coef_[0] - self.l_rate * (y_hat - y_train[i]) * y_hat * (1 - y_hat)
for j in range(len(row)):
self.coef_[j + 1] = self.coef_[j + 1] - self.l_rate * (y_hat - y_train[i]) * y_hat * (1 - y_hat) * row[j]
else:
for j in range(len(row)):
self.coef_[j] = self.coef_[j] - self.l_rate * (y_hat - y_train[i]) * y_hat * (1 - y_hat) * row[j]
#mse calculation
if _ == 0:
mse_first.append(1/n * pow((y_hat - y_train[i]), 2))
if _ == self.n_epoch - 1:
mse_final.append(1/n * pow((y_hat - y_train[i]), 2))
return {"first_mse": mse_first, "final_mse": mse_final}
def fit_log_loss(self, X_train, y_train):
if self.fit_intercept:
self.coef_ = [0] * (len(X_train[0]) + 1) # initialized weights
else:
self.coef_ = [0] * len(X_train[0])
n = len(X_train)
log_loss_first = list()
log_loss_final = list()
for _ in range(self.n_epoch):
for i, row in enumerate(X_train):
y_hat = self.predict_proba(row, self.coef_)
# update all weights
if self.fit_intercept:
self.coef_[0] = self.coef_[0] - ((self.l_rate * (y_hat - y_train[i])) / n)
for j in range(len(row)):
self.coef_[j + 1] = self.coef_[j + 1] - ((self.l_rate * (y_hat - y_train[i]) * row[j]) / n)
else:
for j in range(len(row)):
self.coef_[j] = self.coef_[j] - ((self.l_rate * (y_hat - y_train[i]) * row[j]) / n)
#log-loss calculation
if _ == 0:
log_loss_first.append(-1/n * (y_train[i] * math.log(y_hat) + (1 - y_train[i]) * math.log(1 - y_hat)))
if _ == self.n_epoch - 1:
log_loss_final.append(-1/n * (y_train[i] * math.log(y_hat) + (1 - y_train[i]) * math.log(1 - y_hat)))
return {"log_loss_first": log_loss_first, "log_loss_final": log_loss_final}
def predict(self, X_test, cut_off=0.5):
predictions = list()
for row in X_test:
y_hat = self.predict_proba(row, self.coef_)
if y_hat > cut_off:
predictions.append(1)
elif y_hat < cut_off:
predictions.append(0)
return predictions # predictions are binary values - 0 or 1
# model = CustomLogisticRegression(fit_intercept=True, l_rate=0.01, n_epoch=1000) # for the first three stages
mse_model = CustomLogisticRegression(fit_intercept=True, l_rate=0.01, n_epoch=1000)
log_loss_model = CustomLogisticRegression(fit_intercept=True, l_rate=0.01, n_epoch=1000)
sklearn_model = LogisticRegression(fit_intercept=True, max_iter=1000)
data = load_breast_cancer(as_frame=True).frame
data_columns = ['worst concave points', 'worst perimeter', 'worst radius']
target_column = ['target']
scaler = StandardScaler()
x = data[data_columns]
y = data[target_column]
x = scaler.fit_transform(x)
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=43)
for dataset in [y_train, y_test]:
dataset.reset_index(drop=True, inplace=True)
mse_error = mse_model.fit_mse(x_train, y_train[target_column[0]])
log_loss_error = log_loss_model.fit_log_loss(x_train, y_train[target_column[0]])
sklearn_model.fit(x_train, y_train[target_column[0]])
mse_predictions = mse_model.predict(x_test)
log_loss_predictions = log_loss_model.predict(x_test)
sklearn_predictions = sklearn_model.predict(x_test)
mse_score = accuracy_score(y_test, mse_predictions)
log_loss_score = accuracy_score(y_test, log_loss_predictions)
sklearn_score = accuracy_score(y_test, sklearn_predictions)
values = {'mse_accuracy': mse_score, 'logloss_accuracy': log_loss_score, 'sklearn_accuracy': sklearn_score, 'mse_error_first': mse_error['first_mse'], 'mse_error_last': mse_error['final_mse'], 'logloss_error_first': log_loss_error['log_loss_first'], 'logloss_error_last': log_loss_error['log_loss_final']}
#answer_dict = {'mse_accuracy': 0.9649122807017544, 'logloss_accuracy': 0.9649122807017544, 'sklearn_accuracy': 0.9649122807017544, 'mse_error_first': [0.0005494505494505495, 0.0005479411901199512, 0.0005462938918075033, 0.0005483649821435793, 0.0005449777602668188, 0.0005465389968782148, 0.0005461152268424567, 0.0005437657975692605, 0.0005449503977559713, 0.0005332354502691827, 0.0005467009152216291, 0.0005348759703757981, 0.0005418900326760828, 0.0005452116082325752, 0.0005255601479164673, 0.0005410931383673695, 0.0005389752869800741, 0.0005382706459865135, 0.000536001719878404, 0.0005391530380032358, 0.00052861440687876, 0.0005243686477474502, 0.0005194644572302617, 0.0005219008910719626, 0.0005275639876855294, 0.0005294625937561191, 0.0005440733711013254, 0.0005275881241668178, 0.0005305869752658705, 0.0005151368622697822, 0.0004898910124624294, 0.0004935688401495719, 0.0005907817481125807, 0.0005415811438306481, 0.0005292364531546022, 0.000519354029765896, 0.000530048787620448, 0.0005026166871066447, 0.0005040574898543715, 0.00047347902939871617, 0.0005297067576448801, 0.000543443978744388, 0.0005145831618823939, 0.0004905213364624332, 0.0005212440135708726, 0.0005131561900031159, 0.0004765201321226917, 0.0005111320227419458, 0.0005163192825300981, 0.0004610524517004608, 0.00047834855910253756, 0.0004952728333993177, 0.0004650573842722183, 0.0004925849659638627, 0.00047928390386125304, 0.0004914957064813486, 0.0005401043061795428, 0.00045344988904217644, 0.0004580495130451206, 0.0005024606037277136, 0.0005639942542426755, 0.0004879060019912843, 0.00048692489615500265, 0.00048184052150317743, 0.0004843324539732614, 0.0004953191547746046, 0.00035433192263757576, 0.000485173425456471, 0.0004887302889066022, 0.00045841559987281407, 0.00047833432345815004, 0.0004920517548157017, 0.0005401379522774756, 0.00044496148493266234, 0.0005292101267784971, 0.00047249580281645884, 0.00046803301365109075, 0.00040905896075669015, 0.00045240078183608583, 0.000501825599571657, 0.0005182166586603607, 0.0003930559875549771, 0.0005080011773601419, 0.00043714777786576755, 0.0004764651771342405, 0.00048796350839978354, 0.0004914422957405987, 0.0005073628582695488, 0.0004628327545938161, 0.00047661339415538484, 0.0004704301736712522, 0.00034274781128216863, 0.0004640934664821125, 0.00040369471931905155, 0.00033758540589701506, 0.0005483538824952028, 0.0004919354576309312, 0.00043974794160051884, 0.00043524100322905646, 0.00040661779977321105, 0.0005138061670362106, 0.0003874576793676253, 0.00040920505661737956, 0.0005175153612005218, 0.0004932123813134447, 0.0005419860384293299, 0.00043456842906439684, 0.0005369460173982624, 0.0003664763737532122, 0.00022607369035675579, 0.0003845567549842926, 0.00041641068626195837, 0.00046817111259436317, 0.0004306347683323436, 0.0004005891181501844, 0.0005427778181644802, 0.0004260997428870622, 0.0006191595949171043, 0.0005039784759282975, 0.00038689991280111655, 0.0004847693550949102, 0.0003589721846317479, 0.000409342633992966, 0.00038716807807547964, 0.000420398437909964, 0.0004445232745155467, 0.0003701607632677591, 0.0002672632969105238, 0.0004140059318003212, 0.00045976850544396754, 0.0005438739184497396, 0.0005294190467014072, 0.0005035578208551643, 0.0003970814250427969, 0.00036715521678453713, 0.00043876885780903287, 0.00048007413172196736, 0.0005541605229583185, 0.0004128647786906124, 0.0003473245515419351, 0.0005333474561263617, 0.00037689913950861433, 0.0005530264103153319, 0.000386913648471038, 0.00045342287537645566, 0.0005614869787121577, 0.0003554892933169279, 0.00038156610538147835, 0.0003714947746249783, 0.0004802974208458598, 0.00034211548423882123, 0.00040357616088400897, 0.0004621585340045536, 0.0004138907035623331, 0.00043760338721928095, 0.00026988827525152196, 0.00041762995652114005, 0.00045246231460968443, 0.0005244481462528051, 0.0004530171560581885, 0.00040242453214095513, 0.0002419289120492027, 0.0004815823486935371, 0.0004009357764422529, 0.0004171891300034429, 0.00034783346129895344, 0.0003384672205206901, 0.0004569892650260599, 0.0002664153057033883, 0.00040710673012498143, 0.0003275459589159061, 0.00035150130818314545, 0.00032743432804793136, 0.0003551330940591086, 0.0005610319809155461, 0.00041062145764711195, 0.0004516364483726682, 0.00024554003448021494, 0.00025646231798467055, 0.0003571935945758426, 0.0003719465962073665, 0.00038033617191280897, 0.00041763154501231437, 0.0004907196144842212, 0.0002828547287694165, 0.00032470855580993524, 0.0005863745928244529, 0.0003472973654533829, 0.0005367093218942025, 0.0002659396871922151, 0.0003739499652148518, 0.0004218498242967838, 0.0004251687046625409, 0.0003549590024612001, 0.0003260385597854337, 0.0003392498931537481, 0.0004744212856327029, 0.0005632772010340513, 0.0004106205797869412, 0.00043125271386491485, 0.000397301412848671, 0.00020149830172230108, 0.0001933831934891455, 0.00034389009469821787, 0.0002443254833977653, 0.00029181413451811377, 0.0003771552863222578, 0.00048794749334267793, 0.00014576075530085256, 0.00037906214207577746, 5.968153133303602e-05, 0.00031650611105789875, 0.0003660949380008519, 0.0002344045277011704, 0.00041869364019833855, 0.0004650584005241584, 0.0004443483305933685, 0.0002853573649972399, 0.0003799817319314774, 0.00032024383961330814, 0.000421835342161436, 0.00030415652888467307, 0.0004414842023769592, 0.000465485780899899, 0.0005498510281383849, 0.0003547294092533419, 0.00038302982334836904, 0.0002969483981695131, 0.00018433436082131074, 0.00048377043399116496, 0.000409870070217235, 0.0004688729871240738, 0.000412001051180278, 0.0002695470992641485, 0.00040224729204097246, 0.0003660024414908535, 0.000287458346144709, 0.00031474830726236914, 0.00029945400440142567, 0.00045642633351063134, 0.0005661658614930694, 0.0004422297416020022, 0.00023733283754232766, 0.00035528871628211895, 0.0005240269405916067, 0.000344753124738, 0.0004910776017947793, 0.0005270789418923383, 0.00033264606107805497, 0.00034313130213992326, 0.0005732874548718284, 5.511770106966185e-05, 0.0002043009467377535, 0.0005593076136690572, 0.00032236999173073034, 0.0003215702412480134, 0.00022574225764479433, 0.0003117113387775454, 0.00046732368386860255, 0.0004242603307619002, 0.0003455160671534463, 0.00023367533263978215, 0.00034353870765116163, 0.00035518172026570823, 0.00037544164618787015, 0.00043192926962321753, 0.00047848717949889035, 0.00026385317090495033, 0.0003371099097013716, 0.0002801277219493312, 0.000263118419036581, 0.00029922430798425605, 0.0002694948664315611, 0.0003451450956527472, 0.0003084359518069169, 0.0003221681451163221, 0.0004927426943693315, 0.0004023208135575836, 0.00031426426458912684, 0.0004056562640842282, 0.0001924201479313467, 0.0003281019270064641, 0.00031102898157731994, 0.0005868184291999855, 0.00027600147985488126, 0.00041434588207008656, 0.00032679458424931753, 0.0003838821202055928, 0.000254698589848675, 0.0005553788567074868, 0.0002536504458720441, 0.00022687087272336878, 0.00014582009374798999, 0.0005287147890040429, 0.0001900177425091066, 0.0002247465786449439, 0.00046546725135643155, 0.00010077277297404927, 0.00023394760905071202, 0.0003332603284001963, 0.0005044453516551002, 0.0002519366373680491, 0.00022202073935511548, 0.0003110160538230961, 0.0005648005489345642, 0.0003404925330417336, 0.00015746455332261064, 0.0003529879194489918, 0.0002628978664450803, 0.0004161721248224806, 0.00014904702547036255, 0.0002844760051473288, 0.0002414617139268484, 0.0002856259663530048, 0.0003487476974357744, 0.00041001729843102044, 0.00011869982969064866, 0.0001900656987409056, 0.00026149939678756813, 9.859116411118056e-05, 0.00031113316400930473, 0.00014616014388544688, 0.00017202334743734426, 0.00019393980341928806, 0.0004580708571104337, 0.00013638588085578064, 0.0002591957964442887, 0.00015103243400691797, 0.0005848868519705076, 0.00028956940261395524, 0.0003584921678550362, 0.00019346881792227567, 0.0002727469049485535, 0.00034973260492874485, 0.000347261958408459, 0.0004162286342354487, 0.0002645013009599788, 0.0003284012915691061, 0.0005561753070619193, 0.00033540046544619056, 0.000385136962155825, 0.000439187184278462, 0.00010721588305580638, 0.0002855605138523193, 0.00036750788591800003, 0.000422260143834295, 0.0005890587612655336, 0.00025632742821475206, 0.00035487282474804563, 0.00024198679024498956, 0.0004852083252017277, 0.00011261018855196309, 0.0003310267123897082, 4.7622105517488124e-05, 0.00030340547230564417, 0.00038573677932953767, 0.0005034987537162957, 0.00047613666461257883, 0.0001901365421671457, 0.0004006173816527008, 0.00020795277534093164, 0.0005953182329338522, 0.0003963035842122334, 0.0005344862253046106, 7.525130023812591e-05, 0.00016987067813464924, 0.00034398739376393696, 0.00017816182276368802, 0.00021846162605621485, 0.00011071194154457004, 0.00029471579041434457, 0.0001689416482072831, 0.0005235106590386146, 6.91361429852211e-05, 0.0003264865879627291, 0.00033787019630380686, 0.000144458372835925, 0.0002328631976192741, 0.0003293597830469982, 0.00034562481623761536, 0.00045867270769285975, 0.00011123546551707527, 0.00022743012504587086, 0.000243947732848329, 0.0005161740801358892, 0.0001544597401008926, 0.0002811449491496434, 0.0005205706331082447, 0.00025771047145531546, 0.0001457014340401317, 0.0001333350182161567, 0.00015490453253846936, 3.745016965786112e-05, 0.000153523350710173, 0.0001654522021763927, 3.0101599188907003e-05, 7.597455438037712e-05, 0.00018150555051756328, 0.0001714792438280893, 0.0005167626177503202, 0.00014896530063585734, 0.0006547085472530755, 0.00013064607755645737, 0.0002112807160794762, 0.0002532595569329841, 0.0003420605030016487, 0.00013641904710503077, 0.00022503738438268208, 0.0003079864683134793, 7.454296780839784e-05, 0.000308432365935107, 0.0005809686414807144, 0.0002443137509055542, 0.00034547852671000415, 0.00027003660867644223, 0.0004992134417440238, 0.0004755136566908245, 0.00017756427453527896, 0.00028203288643674853, 0.00024856833711122664, 0.00044937454740969987, 0.0005555419086714472, 0.0006059255423401778, 0.0004089780454961678, 0.00029624420380520517, 4.4909604753981315e-05, 0.00018103362516059586, 0.00028165243492492465, 0.00080693163001135, 0.00019976171785563923, 0.00022124036090737158, 0.0003073006457830564, 0.00021735768789013456, 0.0005324085424981139, 0.00017214199045062433, 0.00024545327637382457, 0.0003531159374859849, 0.00038956141954357904, 0.0005800131835296025, 0.00013674925208202791, 0.0003306923546950355, 8.321355522132788e-05, 0.0005596199009707334, 0.00014710979556347095, 0.00043872252698973126, 0.00039030170175471767, 8.311969100423628e-05, 0.0007046924706110442, 0.000309662625287033, 9.586749746053568e-05, 0.00036065409688159586, 0.00019653555727707398, 0.0003813721359676146, 0.0005486090296429864, 0.0002189680967151873], 'mse_error_last': [3.287806811757015e-08, 3.945627382344156e-13, 8.475770985120507e-09, 3.565979905527658e-05, 1.443312704800495e-07, 2.9478452136510755e-07, 4.084174488633467e-06, 2.0872149006030794e-07, 2.716402129705731e-06, 2.7811646099227164e-13, 7.036606028147768e-05, 1.5746979199776993e-10, 1.0252875269280438e-06, 2.9850397916747472e-05, 3.499228475069048e-12, 2.692480492950975e-06, 9.846877956300406e-07, 1.1846962423166893e-06, 4.6503700700944075e-07, 6.799606770912706e-06, 1.2425410922418629e-08, 2.9626046896192174e-09, 4.3492328837978976e-10, 5.062768246529268e-09, 2.0001955297368465e-07, 9.636699328547477e-07, 0.000391586221961692, 7.904253215151276e-07, 1.9289707351658356e-08, 3.966434262457259e-09, 1.1158360933163301e-13, 3.2857300033535768e-12, 0.002191006085164583, 0.00029877277322985855, 7.547075648163248e-06, 2.717086950066536e-07, 9.381210806626776e-08, 8.546558299592732e-10, 4.347735523117067e-09, 5.447241135839191e-16, 9.101089801810448e-07, 0.000569137550853956, 3.0157160633850155e-07, 6.592047760688601e-12, 5.47957882841122e-06, 5.919107377016455e-09, 9.139362974196732e-13, 4.699484185116314e-08, 2.514428110470586e-06, 1.7288036176702647e-13, 1.254916538198217e-10, 1.2505321300353752e-08, 2.63902060131015e-11, 1.96705917553311e-08, 1.2965184716199462e-09, 8.588427669557448e-09, 5.429519721574718e-05, 7.458566233419748e-12, 1.8468663717121892e-11, 2.9937692485564837e-07, 0.0010357403576488754, 4.881195546863931e-08, 4.256577032812513e-08, 1.9228092617501453e-08, 5.606837095070431e-09, 1.1930458281426655e-07, 6.04849840774588e-20, 7.944272557076728e-08, 8.352903198977993e-08, 5.894036788924832e-11, 2.1579875993313653e-08, 5.20109656435424e-07, 0.00025717278888797243, 1.1595108456397995e-10, 1.8479807790531623e-05, 3.195166121347455e-08, 1.0202867982802931e-08, 1.2435566974684999e-12, 4.509295108240448e-09, 3.5694714952262814e-06, 4.3263713365983305e-05, 1.2951863006515352e-13, 2.7624572082119065e-07, 1.3469950664923914e-10, 2.286365472570852e-08, 1.5048221363217297e-07, 1.7702788753862821e-06, 1.4424986226047993e-05, 5.60308936279901e-08, 1.6718746304706675e-07, 1.4521474642796737e-07, 1.9125397265337487e-16, 8.403370223238914e-08, 1.653001488167725e-11, 2.3598058619894017e-16, 0.0003477496947267959, 2.9757550814574583e-06, 2.6574158468331477e-09, 4.31801133512164e-09, 5.255072767283223e-11, 7.257276466264566e-05, 1.2948460780106293e-12, 6.42154352641882e-11, 4.38910265267654e-05, 4.370804813283753e-06, 0.00016508828692340474, 3.2756345506582116e-09, 0.00021896130779609026, 1.2696940544804728e-12, 5.07704531278458e-22, 2.458831017439387e-11, 1.3015516148562643e-09, 6.143546853954314e-07, 1.2290582717225785e-08, 3.305686003689673e-10, 0.00041523171207204386, 4.0033674534574785e-09, 0.0019376475854209312, 6.412624897622702e-06, 6.186669915733371e-11, 1.642026412483847e-06, 5.044379573313505e-12, 2.0461458628074243e-09, 1.929004661380844e-10, 1.1533936195175604e-08, 4.274116074887852e-08, 5.0148129716366536e-11, 1.721672838593025e-16, 3.1532548484262024e-09, 6.352355240503192e-07, 0.0003799147712227519, 0.00021823327468118426, 2.724918609891164e-05, 1.2076654469097298e-09, 6.313284551399956e-11, 1.0583409687519379e-07, 5.295475247021648e-06, 0.0006403100718169729, 6.472324195075504e-09, 1.6023663116473846e-11, 0.00029216381084928703, 2.503205548761734e-10, 0.0005985214192786218, 1.080633816491386e-09, 9.672595332667232e-07, 0.0006928734565254648, 6.376796714161828e-11, 3.7154358803126936e-10, 4.4944315395301366e-10, 5.996659218584282e-06, 2.3338820785492992e-11, 1.1658332317886958e-08, 1.8244767464354497e-06, 5.2043043104415454e-08, 2.4955266168909643e-07, 2.8193037924093992e-14, 4.864525060916816e-08, 8.155300559847273e-07, 0.00024788787447232454, 8.912723509456514e-07, 1.940813749944441e-08, 2.067048226189943e-15, 1.4446789050714843e-05, 1.4737160366369144e-08, 7.01756679166746e-08, 2.1656657021965689e-10, 8.985631803363753e-11, 1.7249344125190613e-06, 2.612684569451447e-13, 3.700422153611761e-08, 2.0581730601517316e-11, 7.180637445196949e-10, 4.956563839526298e-11, 1.4987764489225294e-09, 0.0009593760648315208, 4.9739336113967516e-08, 3.192305165925103e-06, 6.357868815961176e-14, 6.03848994581072e-14, 1.19976011699373e-09, 3.623995595906822e-09, 1.2281916960251858e-08, 1.1727805142876437e-07, 2.685301499027496e-05, 2.1873907042220745e-12, 1.478230804402302e-10, 0.0011962727860526906, 7.599236989255468e-10, 0.0003259618508721395, 1.453994739713749e-12, 9.862361365051029e-09, 4.318795738831577e-07, 3.338367899482604e-07, 3.21517634197168e-09, 3.4324865784094363e-10, 7.486678068388084e-10, 1.112217296281584e-05, 0.0007342790786951575, 1.7498453351206776e-07, 1.1867864508022607e-06, 8.745396316077148e-08, 5.6607284537636935e-15, 1.2163061684049553e-15, 1.889998625036093e-09, 4.228988663330003e-13, 3.3970330591284725e-11, 3.2364872246481775e-08, 2.463192259158208e-05, 3.53077533802743e-17, 5.175181801442896e-08, 1.6036011074654453e-24, 3.4666503166646783e-10, 2.0753003640737648e-08, 4.034106850977476e-13, 1.5538221038802899e-06, 1.5153225280360603e-05, 4.525006458669469e-06, 5.546147724043559e-11, 9.922127289629481e-08, 2.3147828854834346e-09, 9.162173705500747e-07, 4.016234035567447e-10, 4.9849229856178734e-06, 8.127875794117119e-06, 0.0005800589422556135, 1.2198187797694596e-08, 6.643514079963884e-08, 1.6157503537842245e-10, 7.23609747432269e-15, 3.8887572722073626e-05, 9.027437682756584e-07, 1.8891067341842764e-05, 1.1091464047707272e-06, 5.158164310182231e-11, 4.967615000225773e-07, 4.647110179958793e-08, 3.795565102642113e-10, 1.2897546868072545e-09, 8.86076682196531e-10, 2.0339791742161595e-05, 0.0008159153268438031, 2.7486111760898043e-06, 3.655297731788898e-12, 2.3473074977922872e-08, 0.00019152366434257474, 3.839534531103524e-08, 7.774322566533484e-05, 0.00020252641430490927, 5.793019218043422e-09, 2.153424306202393e-08, 0.0009193019613873915, 1.1318626838642848e-22, 5.109773412108666e-13, 0.0005874650107451249, 5.248096157196056e-09, 6.1418849379104115e-09, 7.400863961555774e-12, 2.052808401452867e-09, 7.343946418219191e-06, 1.1606600801370971e-06, 2.654869219305436e-08, 9.092932542029658e-12, 3.292028639934992e-08, 6.655632840572176e-08, 1.4102956262413203e-07, 4.851909465370888e-06, 5.144206544674169e-05, 1.0792890704294546e-10, 2.1049771647690714e-08, 7.718479285224654e-10, 2.8662811888863376e-10, 1.3978222548279194e-09, 2.986753958894677e-10, 5.31606397019499e-08, 4.38368269331526e-09, 1.702794719062836e-08, 7.80096644788455e-05, 1.3411212792907032e-06, 8.682826148544467e-09, 3.007426819911207e-06, 9.121520894656969e-13, 1.813937323305659e-08, 1.0447769147622175e-08, 0.0009275891131246187, 8.689672687074214e-10, 3.027047214618285e-06, 2.846157285903235e-08, 5.413098340367323e-07, 1.4958542458325608e-10, 0.0006364391169869381, 3.124544076266902e-10, 2.2489884136177832e-11, 9.21070875717484e-15, 0.00032252430776483405, 8.972620278254234e-13, 3.044555741014991e-11, 5.8435858610317365e-05, 2.3433873861414687e-16, 8.200354092115982e-11, 3.727318455258318e-08, 0.00021235280938965024, 2.5873940888927644e-10, 3.018411011416271e-11, 1.2459631962327426e-08, 0.0007983912330439719, 9.96660500971056e-08, 1.739687330943335e-13, 1.1372306483949806e-07, 6.395271071849692e-10, 5.1116222731220135e-06, 8.86908815523134e-14, 4.5803218797955506e-09, 2.779019862173935e-10, 3.812276122744424e-09, 1.801274971982778e-07, 2.995689439611615e-06, 4.585266181865767e-15, 5.091198871187805e-12, 1.397222645996513e-09, 7.376425542690248e-16, 2.322059036659243e-08, 2.0938663457627997e-13, 1.1709097129213665e-12, 6.609072907748888e-12, 3.4827980297187755e-05, 6.352479557297379e-14, 1.8506693051849351e-09, 4.892122270724634e-13, 0.0010963127989251369, 1.0188939675464824e-08, 2.5129766399793766e-07, 2.667093876706298e-11, 1.8519086728336602e-09, 1.4256929197502797e-07, 3.3510988718592125e-07, 5.680454931788119e-06, 1.3101988493375635e-09, 7.372365894655826e-08, 0.0009256418566043562, 1.780681792637468e-07, 1.4965487989261619e-06, 1.9798689636260654e-05, 7.094246543812741e-15, 1.1548041569223672e-08, 8.140689988394089e-07, 8.205841276821068e-06, 0.0009839887895648476, 1.4953029273470841e-09, 7.492618462080048e-07, 1.867244726433662e-09, 0.00018455918371737705, 1.2224295271546084e-14, 2.2202307361292331e-07, 9.98219843616936e-19, 3.147209181054101e-08, 3.324336540816189e-06, 0.00017409385387488554, 0.00010219450226311733, 3.31054162517989e-11, 5.204930377488526e-06, 1.8062127134397314e-10, 0.0011950037713024332, 6.127424499582621e-06, 0.00028872786500399607, 1.6660385445740052e-16, 1.707780250015572e-11, 3.8056031733248346e-07, 2.241183648268349e-11, 5.267868197496036e-10, 1.1917414255525315e-13, 4.332888465510437e-08, 7.73114492091818e-12, 0.0003517945463538282, 1.1862780405296587e-16, 1.557617406370498e-07, 2.169871655450448e-07, 7.182860257124418e-13, 1.89373009815254e-09, 2.2748386723387206e-07, 3.1078896875751497e-07, 7.350623369207316e-05, 4.0190367310238875e-14, 6.057334857743309e-10, 4.084154322240328e-09, 0.00014322319663622815, 4.5614671599966735e-12, 3.2893306462284325e-08, 0.00025113911898729766, 5.08395929969555e-09, 2.024226663204474e-12, 7.016137131300849e-13, 5.156916447690803e-12, 6.502639452049721e-19, 7.285823823321273e-12, 2.1674705891872725e-11, 5.333690611294211e-20, 5.040026205228493e-15, 4.585851607005293e-11, 8.246886723127956e-11, 0.0003723462669760496, 3.990247507792199e-12, 0.00156705001117692, 1.3758010634223755e-12, 1.0299628400810168e-09, 8.933225377705857e-09, 8.901580469544122e-07, 2.217327159404894e-12, 1.5508967158087033e-09, 2.236883507333756e-07, 2.9624387190639277e-15, 6.748344960228655e-08, 0.0004943009856952753, 4.70617692375856e-09, 1.4891720593185687e-06, 9.915030651935955e-09, 0.0001510106212081118, 7.762453865979827e-05, 5.311837414232963e-11, 4.954958518433628e-08, 5.009504582947807e-09, 6.943218116842035e-05, 0.0006041056391221985, 0.0008095591362145966, 8.189362198498396e-06, 6.25167440199726e-08, 8.279906516510467e-18, 1.586859493231418e-10, 4.7678515159203866e-08, 0.0021097718537092863, 6.983635371791894e-10, 2.0009595437204914e-09, 4.7632839490376444e-07, 1.2532618741358848e-09, 0.00037695309555736814, 4.2639987385369605e-11, 7.580770605793608e-09, 1.4877672495958344e-06, 9.95942482387738e-06, 0.0008082842918102994, 4.006912671033555e-12, 5.380535232480569e-07, 2.1782787478020208e-14, 0.000686720219645657, 1.6030716759180468e-11, 5.602760870599262e-06, 5.146229557973095e-06, 1.6504145067743444e-14, 0.0019324516597354524, 3.813252266580312e-07, 9.676425333730414e-14, 1.918018516455246e-06, 2.4026949754555214e-10, 7.434335512721491e-06, 0.0005008379074111018, 4.638714920652088e-09], 'logloss_error_first': [0.0015234003968350445, 0.0015233738405423319, 0.0015233447762089492, 0.0015233812714497469, 0.0015233214874565216, 0.0015233490585480228, 0.001523341549761037, 0.0015233000037057794, 0.0015233209082482952, 0.0015231124940479628, 0.0015233515107079233, 0.001523141223832112, 0.0015232662917092437, 0.0015233253246701155, 0.001522973133018776, 0.001523251649928762, 0.0015232138855637229, 0.001523201194825255, 0.001523160432892891, 0.0015232168578249371, 0.001523027453862033, 0.0015229500978430302, 0.0015228602699984917, 0.001522904084789044, 0.0015230067470248631, 0.0015230410071083882, 0.0015233040161669337, 0.0015230069644800585, 0.0015230623065452455, 0.0015227791360777766, 0.0015223074002725526, 0.0015223735689530468, 0.001524128437545067, 0.0015232580361666602, 0.001523033402016316, 0.0015228520545722973, 0.0015230496461757803, 0.001522540100731408, 0.0015225668225507265, 0.0015219819234424923, 0.001523039429470259, 0.0015232927815016764, 0.0015227613097107968, 0.0015223038965072133, 0.0015228853054912144, 0.0015227298639085402, 0.0015220295739843317, 0.0015226892851485528, 0.0015227932962377907, 0.0015217194515481924, 0.001522064444988134, 0.0015223914265868103, 0.001521799546624758, 0.0015223372643226808, 0.0015220776530113329, 0.0015223068234881787, 0.0015232247106755272, 0.0015215605171710334, 0.0015216503048639581, 0.0015225222323004188, 0.0015236623280605898, 0.0015222393912132585, 0.0015222197687266798, 0.0015221195958456755, 0.001522163953699453, 0.0015223825915319667, 0.0015193724659199653, 0.0015221758558402513, 0.0015222475145829735, 0.00152162322496333, 0.0015220396845244469, 0.0015223107267497277, 0.001523236150563094, 0.0015213582370443958, 0.0015230119193372547, 0.0015219193981981273, 0.0015218296439157544, 0.0015205836445600842, 0.001521500769917492, 0.0015224982438375777, 0.0015228156396699657, 0.001520220069117461, 0.0015226054376794391, 0.0015211581809429142, 0.0015219738588987996, 0.0015222053764901303, 0.0015222878360682008, 0.0015226031129102512, 0.001521709272410936, 0.0015219943858522397, 0.0015218661915817576, 0.0015189931738554391, 0.0015217302571296045, 0.0015204307179893263, 0.0015188254927842257, 0.0015233613543587496, 0.0015222924415062631, 0.0015212109386966816, 0.0015211074158933975, 0.0015204780935180527, 0.0015227241810493938, 0.0015199992747022921, 0.001520485711109827, 0.001522756138635128, 0.0015223162641678993, 0.0015232381874250701, 0.001521044479050845, 0.0015231830050895466, 0.0015195254690907108, 0.0015154772472154073, 0.0015199085894539973, 0.0015206492606068185, 0.0015217826734002405, 0.001520964304822024, 0.0015202818386006141, 0.0015232298299459976, 0.0015207984948132358, 0.0015247005206987308, 0.0015224746223485362, 0.0015198888321175043, 0.0015220768955216118, 0.0015192585147013574, 0.001520464613088663, 0.001519942574494359, 0.0015207091068633516, 0.0015212099200119815, 0.001519464148432256, 0.0015166357448680905, 0.0015204966449974113, 0.0015215038395764813, 0.001523328039462772, 0.0015229604034443088, 0.0015224410790971319, 0.0015201555721294003, 0.0015194232452076094, 0.0015211070223988903, 0.0015220095532314604, 0.001523525864746502, 0.0015205315668997484, 0.0015188361056467016, 0.001523043657786991, 0.001519661540893046, 0.0015235079456669036, 0.0015198986532767294, 0.001521422345157828, 0.0015236081042384329, 0.0015190348790410006, 0.0015197090523456811, 0.0015194306825304295, 0.0015219413653719189, 0.0015187492596638187, 0.001520274800840339, 0.0015216124703961486, 0.001520438071201591, 0.0015210693608307955, 0.0015165985501535374, 0.0015206037991409422, 0.001521403072183964, 0.0015228366360304758, 0.0015213312626370626, 0.0015201611213159572, 0.0015156475110406269, 0.0015220219400978647, 0.0015202000072947856, 0.0015205840918388522, 0.0015187621423338822, 0.0015185019344413754, 0.0015214956649431098, 0.0015163630369386674, 0.0015202344161462035, 0.0015183079236251536, 0.0015189124714452605, 0.001518275032652551, 0.001518891125301858, 0.001523571787327787, 0.0015204129518964088, 0.0015212435634253236, 0.0015156260101870508, 0.0015161009275643878, 0.0015190371658846138, 0.0015194249661503818, 0.0015196249680979853, 0.0015205587040520965, 0.00152221195066158, 0.0015169130456938104, 0.0015181292418789305, 0.0015242066423888556, 0.001518761163511701, 0.0015230862634513821, 0.001516219234885954, 0.0015194417552265587, 0.0015206264816159888, 0.0015207223545743692, 0.0015189370718014657, 0.0015181395599357431, 0.0015185153132665592, 0.0015218547528826046, 0.001523748076949043, 0.0015203694024763302, 0.001520722904585724, 0.0015200318212441244, 0.0015138399487839337, 0.0015135997364931388, 0.0015186018832215819, 0.0015155037534368466, 0.0015169418321816061, 0.0015194697838462551, 0.0015220266863934843, 0.0015110570029253066, 0.001519335642374135, 0.0015043987168535011, 0.0015176887917478146, 0.0015190876672258684, 0.0015147630614707132, 0.0015202209842846192, 0.0015215709603005063, 0.0015210820947092508, 0.0015166829603849264, 0.0015194293905007427, 0.001517516073568095, 0.001520544624409049, 0.0015170651659999416, 0.0015210047006533327, 0.0015214570752815932, 0.00152349896250768, 0.001518782504468521, 0.0015195716398392284, 0.0015170838290532083, 0.0015128368420977549, 0.0015220429842240217, 0.0015202040073889439, 0.001521466954440443, 0.001520045706017605, 0.0015160988696156427, 0.0015200240409951134, 0.0015190548215916445, 0.0015164483509751948, 0.0015175833221782783, 0.0015168386085356715, 0.001521314756731103, 0.0015236440314815478, 0.001520893909218387, 0.001514957944068496, 0.001518763139660834, 0.0015227713520102478, 0.001518174453368019, 0.001522168944807809, 0.0015230252927574803, 0.0015181024731964724, 0.0015183649839279416, 0.0015240134577922493, 0.0015031923010619735, 0.0015131912713093275, 0.0015237833627599826, 0.0015177030978126812, 0.001517663003531718, 0.001514009841586273, 0.0015173964764301374, 0.001521462115545991, 0.0015203825587736103, 0.0015184047630171344, 0.0015146495833091853, 0.0015183035328481423, 0.0015186580482666604, 0.001519278555112284, 0.0015207476801285859, 0.0015216348033474183, 0.0015157764568272916, 0.0015181441280942704, 0.0015160482197362837, 0.0015153843885420653, 0.0015169869804710166, 0.0015159292282907275, 0.001518339646500471, 0.001517234764685407, 0.001517621780858415, 0.0015219679350248403, 0.0015199429711060199, 0.0015173883643208907, 0.0015199756385123418, 0.0015128396100572053, 0.0015178193142898738, 0.001517256860285915, 0.0015241236492659026, 0.0015161114788606641, 0.0015202516420560633, 0.0015177524459265151, 0.0015194623480274628, 0.0015153966577003395, 0.001523379468080435, 0.001514973304729164, 0.0015142931058930364, 0.001510165763119825, 0.0015227712225540859, 0.0015126858140143327, 0.0015141157756705416, 0.001521492211322157, 0.0015068993848439361, 0.0015144145856671887, 0.0015176089067353614, 0.0015224446252386017, 0.0015151403623998782, 0.0015136346547357398, 0.001517195215616799, 0.0015235613003028619, 0.0015180847188984755, 0.001510517548604127, 0.0015185222694708209, 0.0015155231678054294, 0.00152023467318555, 0.0015103291417393329, 0.001516203024100065, 0.0015146127394263626, 0.0015159676060946625, 0.0015183114088051463, 0.0015197985369034427, 0.0015080141828034336, 0.0015123879299268346, 0.0015149926323790527, 0.0015062994496767654, 0.0015170868117454685, 0.0015095340241115516, 0.0015110022628980126, 0.0015125058336424371, 0.0015213342319463483, 0.0015088654147857945, 0.0015146923585651066, 0.001510074196603196, 0.0015243734014149433, 0.00151626098267167, 0.001518579443439853, 0.001511833286231037, 0.0015152975847062238, 0.0015179166991154398, 0.0015176944766360852, 0.0015202347059658052, 0.0015154318994670395, 0.0015176085315177358, 0.0015236783418925623, 0.0015177738584638346, 0.001519350563144189, 0.0015208462751378413, 0.0015066907710553656, 0.0015160963625739348, 0.0015188012757429274, 0.0015204263126237536, 0.0015240961003314445, 0.0015145414568423759, 0.0015183572126730531, 0.0015143288133244465, 0.0015214554435802735, 0.001507504412059364, 0.0015175824612075704, 0.0014998855469817755, 0.0015166999915069688, 0.0015192651515107268, 0.001522542194390611, 0.0015217940167789581, 0.0015120154568639983, 0.0015197334414352498, 0.0015128195271229415, 0.001524729330765104, 0.0015189682826642615, 0.0015228136690107902, 0.0015033429160301842, 0.00151078405551328, 0.0015173915008355405, 0.0015107026880512662, 0.0015131798157469818, 0.0015062381872341913, 0.0015162567960575633, 0.001510121214661497, 0.001522409502243177, 0.0015024012754307483, 0.001517399004740556, 0.0015178218194878987, 0.0015093298491950131, 0.0015130775714984016, 0.001517456162331717, 0.0015180696591715198, 0.0015212961557630167, 0.0015069224332611424, 0.0015129031862623381, 0.0015141581865846135, 0.0015223645748004347, 0.00150914089856786, 0.0015149674498632902, 0.001523042877866342, 0.001514818211463972, 0.00150923880040689, 0.0015083734214874714, 0.0015090500456102645, 0.0014968717849569758, 0.0015094872093321678, 0.0015101715419956216, 0.0014949805293100974, 0.0015021200441933935, 0.0015110252399337682, 0.001509522682236837, 0.0015220827819676485, 0.001509073524604779, 0.001525634202262724, 0.0015069876709012343, 0.001511568091918832, 0.0015135685761351867, 0.0015176337672639113, 0.0015073879338892994, 0.0015130436567784345, 0.0015156310258756908, 0.001501882419587172, 0.0015166059906152418, 0.0015240381125561826, 0.001513911188305067, 0.0015176679985601362, 0.0015144400380551722, 0.001521741886577195, 0.001521073833378377, 0.0015106709663091065, 0.0015154069539186745, 0.0015133695107780365, 0.0015209232751339184, 0.0015239370598726066, 0.0015254279433934528, 0.0015199160040492321, 0.0015153097466138283, 0.0014975132881629286, 0.001510683750549295, 0.0015154133302617886, 0.0015292043298076273, 0.001511606505916653, 0.001512738494676759, 0.0015161282892638653, 0.0015125991222108203, 0.001522571410872539, 0.0015102923732418488, 0.0015130476867369345, 0.0015179957203935244, 0.0015183034780378018, 0.0015245720000270696, 0.0015079636609056349, 0.0015172064927667032, 0.001502441937670564, 0.0015240464105194251, 0.0015085337701091347, 0.001520275467421626, 0.0015192933351014278, 0.0015032674713348253, 0.0015267168562242174, 0.0015162913458764342, 0.0015044632472619676, 0.0015173825585052443, 0.0015115537723872341, 0.0015179731210517133, 0.0015230149119716673, 0.0015123718291627987], 'logloss_error_last': [0.00020978530635112937, 2.0194054334952113e-05, 0.0001676773394993022, 0.0008384021169670378, 0.00028497327201385935, 0.0003654203953882387, 0.0005675641742967892, 0.0002811744163391883, 0.0005071064619957113, 2.458227587919548e-05, 0.001061264562148203, 8.250951628362739e-05, 0.00040361616996438803, 0.000785127450946274, 3.756992232759174e-05, 0.0005555726119569959, 0.00040448551759899557, 0.000433554201814113, 0.0003825499847921096, 0.0006127273999459611, 0.00016414042382830905, 0.00013317067877506228, 9.462477462145791e-05, 0.00015276862266716342, 0.0003157957266485935, 0.0004368364862662239, 0.0013193441561650775, 0.00039961193748981025, 0.00019206393611288005, 0.00014976209799393554, 1.8576813375271446e-05, 3.5338803349024875e-05, 0.00575615258348561, 0.0013110196050876795, 0.0006311947423785505, 0.00032088539241973324, 0.0002649774353002637, 0.000103304925951682, 0.00012777937962502742, 6.6132158904818045e-06, 0.00040605496234636756, 0.001464567149877352, 0.00033115849771411197, 4.128747094178543e-05, 0.0005286307878211744, 0.000177057738233533, 2.8309552887520488e-05, 0.00023660445263798985, 0.00044344075283511014, 2.00605258085123e-05, 7.33112735844042e-05, 0.00018285719086864752, 4.938154358294744e-05, 0.00019047257049079603, 0.00011245353207135723, 0.0001597926961198958, 0.0009741594918361163, 4.058270112651556e-05, 5.3042280052160266e-05, 0.00034789668752146743, 0.00198141179321134, 0.000216301123647949, 0.00021702132603099056, 0.00018695718884936761, 0.00015798110682379194, 0.0002987493578294071, 9.196138057862581e-07, 0.0002474328285143101, 0.0002721588692930473, 7.585187608600969e-05, 0.0002088696508061831, 0.00034833160238418405, 0.0012903937898556736, 7.470721229448368e-05, 0.0008138689270133023, 0.0002044067007779345, 0.00018064055315998391, 2.7083774194392505e-05, 0.00012907080939045888, 0.0005419501002092794, 0.0008565447115919178, 1.9072302219458775e-05, 0.00044940924354804795, 6.792590894248325e-05, 0.000228135113761492, 0.00032756366197709885, 0.00044598507359366575, 0.0006703639975733745, 0.00022054425338402858, 0.0003172052128391093, 0.0002823433969676773, 4.718046912805218e-06, 0.0002533695883772441, 4.928031772639033e-05, 5.32522374207481e-06, 0.0013804105974993218, 0.0005219617265359577, 0.0001474156993571108, 0.00014022495142865428, 6.705391098393657e-05, 0.0008869751248837809, 3.287706087933044e-05, 6.534781596217704e-05, 0.0008218216915016385, 0.0005758836869322322, 0.0012532558498802892, 0.00014065243897361831, 0.0012788354497875933, 2.9253908720383615e-05, 3.3591396680530126e-07, 5.36257156322053e-05, 0.00012023461575357136, 0.00038310363494806747, 0.0001765376547901449, 9.119812040110741e-05, 0.0013080911446974898, 0.00014740835804096994, 0.0032997976221735377, 0.0007127918511859875, 6.568295852692339e-05, 0.0005345812656715765, 3.674036503787554e-05, 0.00012238239952235507, 7.752215315582802e-05, 0.00016404058156525367, 0.0002652564382558322, 5.756843385829204e-05, 4.66076901971279e-06, 0.0001655525922540206, 0.0004038159863215226, 0.001357968123600897, 0.0012135117326165185, 0.000849969617860503, 0.00011280004095847076, 6.230898907731879e-05, 0.0002635345794022696, 0.0005441347111411183, 0.0015602301679232067, 0.0001707247604923116, 4.7912509113224585e-05, 0.0012885342332171917, 8.755230359999908e-05, 0.0015376742094171982, 0.00011042155303328512, 0.00037432916906215014, 0.001774166336639371, 6.466465092694875e-05, 0.00011618469746470224, 9.822214193922818e-05, 0.0006624378646536931, 4.953808272637335e-05, 0.0001650314623555043, 0.00044760957939588166, 0.00023155526210866752, 0.0003091758022919503, 1.3435552346540031e-05, 0.00022838121516539304, 0.0004052092899666831, 0.0011979203842474298, 0.0004697604413897281, 0.00021475930613397756, 8.43269628140813e-06, 0.0006132477125665207, 0.00018386569460072458, 0.00024374557895918497, 8.898963745177214e-05, 7.766553487680582e-05, 0.0004525943307605522, 1.8920839437337856e-05, 0.00026731666119980967, 5.640160720484268e-05, 8.915074005651792e-05, 5.9577550370210536e-05, 0.00012310186422481313, 0.0018430638344559597, 0.0002433330730356347, 0.0005489420794335669, 1.6029889315521627e-05, 1.6231890318131913e-05, 0.00011098629295784543, 0.00014320163840076744, 0.00016705954366932907, 0.0002917704960851704, 0.0007573851122035061, 3.249026445891807e-05, 7.206034007987177e-05, 0.0019607798233428723, 0.00010705344068816248, 0.0014315853150065156, 3.059266095264561e-05, 0.00016789120287620716, 0.00033732238939267845, 0.00035345877683680605, 0.00013139810564358892, 8.414588135129293e-05, 0.00010569496161706208, 0.0006729031826565304, 0.0016406178538885148, 0.00030977674052542293, 0.00044949654242824665, 0.00026234475354899926, 9.193565967225198e-06, 7.062075162231443e-06, 0.00012583636228814395, 2.289397440844582e-05, 6.199571486413534e-05, 0.00021325530540205296, 0.0008915695855941796, 2.7330539478432324e-06, 0.0002545790324438946, 8.76813265658286e-08, 9.079465686920623e-05, 0.0001917438402464512, 2.5996344977852745e-05, 0.00044607328258606914, 0.0006494373948283936, 0.0005221332420918708, 5.990998861767615e-05, 0.00024591088198214304, 0.00012425436709846113, 0.00041222811594448426, 9.808863474909645e-05, 0.000525062799620314, 0.0007376476627587244, 0.0014510219973835037, 0.00018148618314796586, 0.00026357308657703766, 8.027705816124675e-05, 1.0334171019259885e-05, 0.0008247080519885206, 0.0003852713423057766, 0.000781551328993032, 0.00043721270517966664, 5.668930086410804e-05, 0.0003553430299886061, 0.0002310507280198169, 8.793604652549137e-05, 0.00011732990515866375, 0.00010811378001750636, 0.0006684132401536267, 0.0017882473811885891, 0.0006001273048978551, 3.60029440037474e-05, 0.00020874763423835897, 0.0012971625552443897, 0.00021675279360845257, 0.0009035225399489323, 0.0012123484715579362, 0.0001603318926135052, 0.00018927874965173115, 0.0017376511633156663, 2.3327238613994662e-07, 2.50083497796592e-05, 0.0015556786607080384, 0.00014835251495730648, 0.00014945350275227137, 3.90514912233856e-05, 0.0001306186385075629, 0.000779692221774478, 0.0005292883539213282, 0.00020613461397345033, 4.2174026184180434e-05, 0.00020713630473674543, 0.00023982406305847828, 0.00030199530345530445, 0.0005545143549391205, 0.0009054591939198414, 7.265522968901327e-05, 0.0001987367025536172, 0.00010254430624669855, 8.287854662699949e-05, 0.00012430044651427235, 8.428570880697371e-05, 0.00022884261915558275, 0.00014647276701088875, 0.00017776117847012504, 0.0010332131755923218, 0.00043455175778981693, 0.00016329179740656095, 0.00046157698990838676, 2.656320801329761e-05, 0.000198320121772539, 0.00016402527770785692, 0.001937906251591113, 0.0001035233940667467, 0.0005076839013234518, 0.00020239758059319605, 0.0003771556448708057, 7.884595831001799e-05, 0.00160444927678889, 8.440957893302067e-05, 5.3045435445297026e-05, 1.245534593220382e-05, 0.0013603714295935983, 2.901496754698459e-05, 5.3799492530674636e-05, 0.0008243004095239532, 4.4252251328977835e-06, 6.435487345075979e-05, 0.00024409272281183218, 0.0011134393258808515, 8.451994576337843e-05, 5.812237356734257e-05, 0.00018327102018821169, 0.0017201742961408767, 0.00025967007216001883, 1.976829724297985e-05, 0.0002920060433279794, 0.00010318471381507331, 0.0005568439573955918, 1.6387678814127668e-05, 0.00014340955567363652, 8.203694397801321e-05, 0.0001510332300705574, 0.00029809739014666794, 0.0005488724709687879, 9.498132369233822e-06, 3.834436644158794e-05, 0.0001193195545310481, 6.070807656636289e-06, 0.00020339559437999776, 1.964248871588454e-05, 3.200293651054357e-05, 4.286088972773316e-05, 0.0008017803572698704, 1.659814007883916e-05, 0.00012649281038223657, 2.175682043464655e-05, 0.0018314614486170384, 0.0001689695938226541, 0.0003432197698029531, 5.134504139577244e-05, 0.00014600953120330086, 0.00033670826368726296, 0.00034914297576403835, 0.0005803015876754035, 0.00012606311326019467, 0.0002635345221358478, 0.001573633122008891, 0.0002916905487691673, 0.00045729500395119384, 0.0007188606802768063, 9.750322458262608e-06, 0.00017461708269052122, 0.00040069472874947753, 0.0006308752654970078, 0.0018999559176801366, 0.00013046724574798267, 0.00036584121656081593, 0.00011018220260113393, 0.0010841113960150634, 1.1531252458466221e-05, 0.0002957465947815158, 1.4449864086810782e-06, 0.0002189075198160359, 0.0004908386687705219, 0.0011384879030508256, 0.0009722225181790856, 5.458018662777076e-05, 0.0005589847820854501, 7.339881466652204e-05, 0.001942628540335957, 0.0005642847400322173, 0.0013812325864024173, 4.7603149932854734e-06, 4.2839293219074165e-05, 0.00035835492634366564, 5.037581646988456e-05, 8.957419647959877e-05, 1.5062955447668229e-05, 0.00021879693352760656, 4.5025617190154246e-05, 0.0013583486166290537, 4.364179880051225e-06, 0.0002949649619977162, 0.00032446907848134986, 2.7626118539027616e-05, 0.00011939877890215337, 0.0003118716551662776, 0.0003532108892049776, 0.0008926015108459074, 1.4816666956248275e-05, 0.00010893331286066212, 0.00013494110383433435, 0.0012336045958051762, 3.870324050781085e-05, 0.00021712779469541438, 0.0012544566180841294, 0.0001539755251917328, 3.220570951551899e-05, 2.6069182340506813e-05, 4.120377772087294e-05, 1.4081185934896985e-06, 3.938085695707573e-05, 4.851076312171508e-05, 8.696986617018658e-07, 7.849295056182853e-06, 6.117705642987659e-05, 5.9299529854519326e-05, 0.0013366501495594588, 3.7011011074764703e-05, 0.0024802811900902686, 2.9441285084680375e-05, 0.00010760308247971825, 0.00017208099628097747, 0.00038205387384365703, 3.367668183351691e-05, 0.00011694646975665316, 0.00031173235697726253, 8.200768744140627e-06, 0.00026869378529712553, 0.0017230069588047437, 0.0001467888321540745, 0.00040595108424857637, 0.0001986501737636623, 0.0011948689254727555, 0.0010535856237095986, 6.294381455152524e-05, 0.00022503673594750229, 0.00017374449704603788, 0.0008558527940292877, 0.0015086837813402128, 0.0018267967765365446, 0.0006331412267257104, 0.00027596081559079174, 2.9523463231317684e-06, 7.161856484288053e-05, 0.00022857941739811853, 0.003980252034871689, 9.38586140961911e-05, 0.0001209678710093661, 0.0003119336852593936, 0.00011544701665661156, 0.0014358139134391713, 6.227060610633682e-05, 0.00017709281223565735, 0.0004345008232166506, 0.0006368062914278314, 0.001700005931608288, 3.6991274660716024e-05, 0.0003664253288046321, 1.3371735702402839e-05, 0.0015612269464980268, 4.6260120356759445e-05, 0.000775122008981778, 0.000572438615783536, 1.225534422262342e-05, 0.002952963705648301, 0.00031860542788857545, 1.7166940418418426e-05, 0.0005095969117210861, 9.167345383705244e-05, 0.0006118561483731561, 0.0015599776201566623, 0.00013150139158566042]}
print(values)
answers_to_questions = '''Answers to the questions:
1) 0.00002
2) 0
3) 0.00152
4) 0.0058
5) expanded
6) expanded
'''
print(answers_to_questions)
'''
Stage 1
probs = list()
for row in x_test[:10]:
probs.append(model.predict_proba(row, [0, 0, 0, 0]))
print(probs)'''
'''
Stage 2
model.fit_mse(x_train, y_train[target_column[0]])
predictions = model.predict(x_test)
score = accuracy_score(y_test, predictions)
parameters = {'coef_': model.coef_, 'accuracy': score}
print(parameters)'''
'''
Stage 3
model.fit_log_loss(x_train, y_train[target_column[0]])
predictions = model.predict(x_test)
score = accuracy_score(y_test, predictions)
parameters = {'coef_': model.coef_, 'accuracy': score}
print(parameters)'''
|
986,662 | f300a66a04c60a0a69b0606bd5a5bb909b1f9b6c | ID = "bmpr"
PASSWORD = "jm0976"
|
986,663 | b788af5b097a5067126018dccb2e5d4394e92534 | print 10/3
print 10./3
print int(10./3)
|
986,664 | 5785082b4cd5d588c1c5a998ea625311f7d9d89e | from mmRemote import *
import os
import mm
import sys
import subprocess32
from subprocess32 import Popen, PIPE, STDOUT
import time
import psutil
import sys
from pathlib import Path
class Mesh(object):
def __init__(self): # Define the location of dependencies
# self.PLY_File = PLY_File
self.remote = mmRemote()
self.cmd = mmapi.StoredCommands()
self.PLY_File = 'Print_' + sys.argv[
1] + '.ply' # filename of PLY file - inputted as argument when calling function
self.USDZ_File = 'Print_' + sys.argv[1] + '.usdz'
self.STL_File = 'Print_' + sys.argv[1] + '.stl'
self.folder = 'Print_' + sys.argv[1] + '/'
print (self.PLY_File)
self.File_Dir = str(Path(os.path.dirname(os.path.realpath(
__file__))).parent) + "/3D_MODELS/" + self.folder + self.PLY_File # set's PLY file location
self.Input_Dir = str(Path(
os.path.dirname(os.path.realpath(__file__)))) + '/meshlab.app/Contents/MacOS/' # location of MeshLabServer
self.Output_Dir = str(Path(os.path.dirname(
os.path.realpath(__file__))).parent) + "/3D_MODELS/" + self.folder # location to export file to
self.USDZ_OutDir = str(Path(os.path.dirname(
os.path.realpath(__file__))).parent) + "/3D_MODELS/" + self.folder # location to export file to
self.autoML()
def autoML(self): # call's MeshLabServer and run's automated script to produce surface STL from PLY
cmd1 = 'cd ' + self.Input_Dir
cmd2 = './meshlabserver -i ' + self.File_Dir + ' -o ' + self.Output_Dir + self.STL_File + ' -s automl.mlx'
final = Popen("{}; {}".format(cmd1, cmd2), shell=True, stdin=PIPE,
stdout=PIPE, stderr=STDOUT, close_fds=True)
stdout, nothing = final.communicate()
log = open('log', 'w')
log.write(stdout)
log.close()
time_to_wait = 10
time_counter = 0
while not os.path.exists(self.Output_Dir + self.STL_File):
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait: break
if os.path.exists(self.Output_Dir + self.STL_File):
print("STL file exists")
self.openFile()
def checkIfProcessRunning(self, processName):
# Checks if there is any running process that contains the given name processName.
# Iterate over the all the running process
for proc in psutil.process_iter():
try:
# Check if process name contains the given name string.
if processName.lower() in proc.name().lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False
def openFile(self):
# open Meshmixer
subprocess32.run('open -a Meshmixer ' + self.Output_Dir + self.STL_File, shell=True)
processName = 'meshmixer'
time_to_wait = 10
time_counter = 0
while not self.checkIfProcessRunning(processName):
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait: break
if self.checkIfProcessRunning(processName):
print('Yes a ' + processName + ' process was running')
self.Apply_Profile()
else:
print('No ' + processName + ' process was running')
def Apply_Profile(self):
# initialize connection
time.sleep(2)
self.remote.connect()
time.sleep(1)
# apply commands
self.cmd.AppendBeginToolCommand("makeSolid")
self.cmd.AppendToolParameterCommand('solidType', 1)
self.cmd.AppendToolParameterCommand('solidResolution', 250)
self.cmd.AppendCompleteToolCommand("accept")
self.cmd.AppendBeginToolCommand('separateShells')
self.cmd.AppendCompleteToolCommand("accept")
self.remote.runCommand(self.cmd)
self.Save_File()
def Save_File(self):
time.sleep(1)
save_path = self.Output_Dir + self.STL_File
# request list of objects in scene
objects = mm.list_objects(self.remote)
# select main object in list
main_obj = objects[1]
select_list = [main_obj]
mm.select_objects(self.remote, select_list)
# export to output directory
mm.export_mesh(self.remote, save_path)
self.Close_Program()
def Close_Program(self):
time.sleep(1)
# terminate connection
self.remote.shutdown()
# close Meshmixer
subprocess32.run('pkill -x meshmixer', shell=True)
time_to_wait = 10
time_counter = 0
while not os.path.exists(self.Output_Dir + self.STL_File):
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait: break
if os.path.exists(self.Output_Dir + self.STL_File):
print("PLY file exists")
self.USDZ_Convert()
def USDZ_Convert(self):
time.sleep(1)
cmd1 = 'cd ' + str(Path(os.path.dirname(os.path.realpath(__file__)))) + "/usdpython/"
print cmd1
cmd2 = './stl_to_usdz.sh ' + self.Output_Dir + self.STL_File + ' ' + self.USDZ_OutDir + self.USDZ_File
final = Popen("{}; {}".format(cmd1, cmd2), shell=True, stdin=PIPE,
stdout=PIPE, stderr=STDOUT, close_fds=True)
stdout, nothing = final.communicate()
log = open('log', 'w')
log.write(stdout)
log.close()
# Open Terminal at 'ARSCAN' folder and enter command 'python mesh.py <filename>.ply'
# Make sure the STL file you wish to export is in the defined input directory
# Must include 'ARSCAN' folder in the defined output directory
# define path for file
Mesh()
|
986,665 | e827732ac80f81f49e5bd708b5d7f31488944902 | import tkinter
from tkinter import ttk
from tkinter import *
def sel():
selection = "You selected the option " + var.get()
#label.config(text = selection)
type_str = var.get()
print(type_str)
def getTemp():
with open(type_str , 'r') as f:
data = f.readlines()
data[1] = TempEntry.get() + '\n'
with open(type_str,'w') as f:
f.writelines(data)
root = Tk()
root.title('Pulsar GUI')
root.geometry('500x500')
rows = 0
while rows < 50:
root.rowconfigure(rows, weight=1)
root.columnconfigure(rows, weight=1)
rows += 1
n = ttk.Notebook(root)
n.grid(row=1, column=1, columnspan=50, rowspan=49, sticky='NESW')
f1 = ttk.Frame(n) #first page
f2 = ttk.Frame(n) #second page
n.add(f1, text = 'Input Tab')
n.add(f2, text = 'Output Tab')
n.select(f1)
n.enable_traversal()
var = StringVar()
var.set("L")
R1 = Radiobutton(f1, text="Cepheid", variable=var, value="CepheidFile.txt", command=sel)
R1.pack( anchor = W )
R2 = Radiobutton(f1, text="RR-Lyrae", variable=var, value="RRLyraeFile.txt", command=sel)
R2.pack( anchor = W )
R3 = Radiobutton(f1, text="Common", variable=var, value="myfile.txt", command=sel)
R3.pack( anchor = W)
MassLabel = Label(f1, text = "Mass")
MassLabel.pack(anchor = W)
MassEntry = Entry(f1, bd = 15)
MassEntry.pack(anchor = W)
TempLabel = Label(f1, text = "Temperature")
TempLabel.pack(anchor = W)
TempEntry = Entry(f1, bd = 15)
TempEntry.pack(anchor = W)
LumLabel = Label(f1, text = "Luminosity")
LumLabel.pack(anchor = W)
LumEntry = Entry(f1, bd = 15)
LumEntry.pack(anchor = W)
XLabel = Label(f1, text = "Hydrogen Composition")
XLabel.pack(anchor = W)
XEntry = Entry(f1, bd = 15)
XEntry.pack(anchor = W)
ZLabel = Label(f1, text = "Metal Composition")
ZLabel.pack(anchor = W)
ZEntry = Entry(f1, bd = 15)
ZEntry.pack(anchor = W)
MaxPeriodLabel = Label(f1, text = "Max Period")
MaxPeriodLabel.pack(anchor = W)
MaxPeriodEntry = Entry(f1, bd = 15)
MaxPeriodEntry.pack(anchor = W)
MaxAmpLabel = Label(f1, text = "Max Amp")
MaxAmpLabel.pack(anchor = W)
MaxAmpEntry = Entry(f1, bd = 15)
MaxAmpEntry.pack(anchor = W)
submit = Button(f1, text = "Submit", command = getTemp)
submit.pack(side = RIGHT)
root.mainloop()
|
986,666 | 139dfca425343334817b743f732a2bfaf8b62e54 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# our departure point is http://commons.wikimedia.org/wiki/Category:Illustrations_by_subject
import sys
import os
import json
import urllib2
from urllib import quote
from flask import Flask, request, render_template, send_file, make_response
from retrieve import retrieve_uris
from swarm_bot import swarm_bot
from convert_images import convert_images
PATH = os.path.join('/', 'tmp','panik')
app = Flask(__name__)
def urlencode(s):
return quote(s.encode('utf-8'))
app.jinja_env.filters['urlencode'] = urlencode
def make_api_query(category, q_continue=""):
url = 'http://commons.wikimedia.org/w/api.php?action=query&generator=categorymembers&gcmtitle=' + quote(category) + '&gcmlimit=500&prop=imageinfo&iiprop=url&iiurlwidth=120&format=json'
response = json.loads(urllib2.urlopen(url).read())
if response:
response['url'] = url
return response
@app.route("/<category>")
def display(category):
response=make_api_query(category)
members = []
files = []
if not response:
return make_response("Page not found, 404", 404)
try:
for i in response['query']['pages'].values():
if 'Category' in i['title']:
members.append(i)
elif 'File' in i['title'] and 'jpg' in i['imageinfo'][0]['url']:
files.append(i)
except:
# this is bad an error should yield proper error output
pass
return render_template('view_basic.html', members=members, files=files, category=category)
def get_uris(category):
response=make_api_query(category)
uris = []
for i in response['query']['pages'].values():
try:
uri = i['imageinfo'][0]['thumburl']
if 'jpg' in uri:
uris.append(uri)
except KeyError:
pass
return uris
def setsize(size):
pass
@app.route("/<category>.svg", methods=['GET', 'POST'])
def generate(category):
category_path = os.path.join(PATH, quote(category))
colour = "#000000"
text_data = None
square = False
if request.method == 'POST':
colour = request.form['colour']
# For the typographic style, everything becomes
# UPPERCASE:
text_data = request.form['text'].upper()
if request.form['mediatype'] == "cd":
square = True
uris = get_uris(category)
if len(uris) > 32:
uris = uris[:32]
output_file = category_path + '.png'
files = retrieve_uris(category_path, uris)
swarm_bot(
output_file, convert_images(
files, colour=colour), text_data, square)
response = make_response(send_file(output_file))
response.headers['Cache-Control'] = "no-cache"
return response
@app.route("/")
def hello():
return display("Category:Illustrations_by_subject")
if __name__ == "__main__":
app.run(debug=True)
|
986,667 | dce4cf77cc2d494be18c7a7278abb415617fbf15 | # Add all even numbers in the Fibonacci sequence that are < 4,000,000
import time
def fibsumevens(n):
t,nt,nnt=1,1,1
nnt,runningtotal,stoplight=0,0,0
t0=time.clock()
while stoplight<1:
print t
nnt=t+nt
if nnt>n:
stoplight=1
else:
t=nt
nt=nnt
if nnt%2==0:
runningtotal=runningtotal+nt
print "running total = %i" % runningtotal
print "next term = %i" % nt
print "next next term = %i" % nnt
t=time.clock() -t0
print "it took %f seconds " % t
|
986,668 | 23ef799f2a9dd23309c5e72532d43ec4ad7ef794 | from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
urlpatterns = patterns("",
url(r"^$", TemplateView.as_view(template_name="homepage.html"), name="home"),
url(r"^escala-de-notas/$", TemplateView.as_view(template_name="escala.html"), name="escala"),
url(r"^transition/$", TemplateView.as_view(template_name="info_sheet.html"), name="info"),
) |
986,669 | a85d3d5212a4da770f1f1e183321c1839df9d1aa | #!/usr/bin/env python
import sys
import bencode
import hashlib
if __name__ == '__main__':
if len(sys.argv) < 2:
print('usage: %s <torrent>' % sys.argv[0])
sys.exit(1)
filename = sys.argv[1]
with open(filename) as torrent:
content = torrent.read()
content_debencoded = bencode.bdecode(content)
m = hashlib.sha1(bencode.bencode(content_debencoded['info']))
print(content_debencoded)
print('info_hash: %s' % m.hexdigest())
|
986,670 | 5561e023b2c5fb43d0af9061f1403e3e1436d913 |
def find_even_number():
li = []
for i in range(1,51):
if i % 2 == 0:
li.append(i)
return li
|
986,671 | a4ff539cb800487c5f201509278d93390f98c8ff | import flask
import threading
import subprocess
import RPi.GPIO as GPIO
import pafy
import vlc
import time
import datetime
import host_ip
import os
import validators
import sys
import alsaaudio
app = flask.Flask(__name__)
host = host_ip.ip
url_file = "/home/pi/programming/python/doorbell_getter/url.txt"
skip_file = "/home/pi/programming/python/doorbell_getter/skip.txt"
duration_file = "/home/pi/programming/python/doorbell_getter/duration.txt"
#################################### logging
def write_to_log(msg):
try:
with open('/home/pi/programming/python/doorbell_getter/server_log.log', 'a') as f:
nowstr = datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
strstr = 'date: {}, msg: {}'.format(nowstr, msg)
f.write(strstr + '\n')
except:
pass
print(msg)
sys.stdout.flush()
#################################### audio
def stop():
global player
player.stop()
print("stop")
print(player)
sys.stdout.flush()
def play():
global player
global skip
global duration
global duration_timer
global url
player.stop()
if time.time() - last_reset > 600:
resetPlayer()
print("reset player")
player.play()
print("play")
player.set_time(int(skip*1000))
duration_timer.cancel()
if duration > 0:
duration_timer = threading.Timer(duration, stop)
duration_timer.start()
print(player.get_time())
sys.stdout.flush()
def resetPlayer():
global url
global Instance
global player
video = pafy.new(url)
best = video.getbestaudio()
playurl = best.url
Media = Instance.media_new(playurl)
Media.get_mrl()
player.set_media(Media)
last_reset = time.time()
def setURL(val):
global url
global url_file
global Instance
global player
if not validators.url(val):
return False
if "youtu" in val:
url = val
resetPlayer()
print("url set")
print(player)
sys.stdout.flush()
f = open(url_file, "w")
f.write(url)
f.close()
def setSkip(val):
global skip
global skip_file
try:
skip = float(val)
except ValueError:
return
skip = max(0, skip)
print("skip valid: " + val)
sys.stdout.flush()
f = open(skip_file, "w")
f.write(str(skip))
f.close()
def setDuration(val):
global duration
global duration_file
try:
duration = float(val)
except ValueError:
return
if duration <= 0:
duration = -1
print("duration valid: " + val)
sys.stdout.flush()
f = open(duration_file, "w")
f.write(str(duration))
f.close()
url = "null"
skip = 0.0
last_reset = 0
duration = -1
duration_timer = threading.Timer(duration, stop)
Instance = vlc.Instance("prefer-insecure")
player = Instance.media_player_new()
f = open(url_file, "r")
setURL(f.readline())
f.close()
f = open(skip_file, "r")
setSkip(f.readline())
f.close()
f = open(duration_file, "r")
setDuration(f.readline())
f.close()
#################################### volume
mixer = alsaaudio.Mixer()
volume = mixer.getvolume()[0]
def setVolume(val):
global volume
try:
volume = int(val)
print("volume valid: " + val)
except:
print("volume invalid: " + val)
volume = min(100, max(0, volume))
mixer.setvolume(volume)
volume = mixer.getvolume()[0]
#################################### gpio signals
def onRise(channel):
print("rise: ", time.time())
sys.stdout.flush()
play()
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(11, GPIO.RISING, callback=onRise, bouncetime=200)
#GPIO.add_event_detect(11, GPIO.FALLING, callback=onFall, bouncetime=200)
#################################### text to speech
def speak(text):
subprocess.Popen(['espeak', '-s', '80', text])
################################## http server
def monitor():
while True:
global player
print("{}, {}, {}, {}".format(player, player.will_play(), player.get_state(), player.get_length()))
time.sleep(600)
#monitor_thread = threading.Thread(target=monitor, args=())
#monitor_thread.start()
####################################
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>', methods = ['POST', 'GET'])
def catch_all(path):
global url
global host
if flask.request.method == 'POST':
if path == 'play':
play()
elif path == 'stop':
stop()
elif path == 'url' :
setURL(flask.request.form['url'])
setSkip(flask.request.form['skip'])
setDuration(flask.request.form['duration'])
elif path == 'speech':
speak(flask.request.form['speech'])
elif path == 'volume':
setVolume(flask.request.form['volume'])
return flask.render_template('index.html', url=url, skip=skip, duration=duration, volume=volume)
if __name__ == '__main__':
app.run(host=host, debug=False)
|
986,672 | e53fde9fb11d7d4497c994de43f75222f43064a4 | import multiprocessing as mp
def job(a,b):
print a,b
if __name__=='__main__':
# create start And join
p1 = mp.Process(target=job,args=('Hello','MultiProcessing'))
p1.start()
p1.join()
|
986,673 | 76ae225f13a2de1dcf8cdedea5e12c6b7af93fc4 | class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
# 实例给的好,不然我们可能会很容易忘记如果k大于链表的长度怎么办啊,这道题相当于在k值取余链表长度之后得到 m,然后取出倒数第m个链表节点
if k == 0 or not head or not head.next:
return head
length = 0
cur = head
while cur:
length += 1
cur = cur.next
k = k % length
cur = p = head
for i in range(k):
p = p.next
while p.next:
p = p.next
cur = cur.next
ans = cur.next
p.next = head
cur.next = None
return ans
if __name__ == '__main__':
solu = Solution()
head = ListNode(1)
head.next = ListNode(2)
# head.next.next = ListNode(3)
# head.next.next.next = ListNode(4)
# head.next.next.next.next = ListNode(5)
ans = solu.rotateRight(head, 2)
while ans:
print(ans.val)
ans = ans.next
|
986,674 | 517f9666499a8a12a18d438fd751b4ed5b7e9849 | import pytest
from widgetastic.widget import Checkbox
from widgetastic.widget import View
from widgetastic_patternfly4 import ColumnNotExpandable
from widgetastic_patternfly4 import CompoundExpandableTable
from widgetastic_patternfly4 import ExpandableTable
from widgetastic_patternfly4 import PatternflyTable
from widgetastic_patternfly4 import RowNotExpandable
TESTING_PAGE_URL = "https://patternfly-react.surge.sh/components/table"
SORT = [
(
"This is a really long table header that goes on for a long time 1.",
"ascending",
["a", "one", "p"],
),
(
"This is a really long table header that goes on for a long time 1.",
"descending",
["p", "one", "a"],
),
(
"This is a really long table header that goes on for a long time 3.",
"ascending",
["a", "b", "k"],
),
(
"This is a really long table header that goes on for a long time 3.",
"descending",
["k", "b", "a"],
),
]
@pytest.mark.parametrize("sample", SORT, ids=lambda sample: "{}-{}".format(sample[0], sample[1]))
def test_sortable_table(browser, sample):
header, order, expected_result = sample
table = PatternflyTable(
browser,
".//div[@id='ws-react-composable-c-table-composable-sortable--wrapping-headers']/table",
)
table.sort_by(header, order)
column = [row[header] for row in table.read()]
assert column == expected_result
@pytest.mark.parametrize(
"sample", [("select_all", True), ("deselect_all", False)], ids=("select", "deselect")
)
def test_selectable_table(browser, sample):
method, expected_result = sample
table = PatternflyTable(
browser,
".//div[@id='ws-react-composable-c-table-composable-selectable-with-checkbox']//table",
column_widgets={0: Checkbox(locator=".//input")},
)
getattr(table, method)()
for row in table:
if row.index != 1: # skip row with disabled checkbox
assert expected_result == row[0].widget.selected
def test_expandable_table(browser):
expected_read = [
{
"Repositories": "one",
"Branches": "two",
"Pull requests": "a",
"Workspaces": "four",
"Last commit": "five",
},
{
"Repositories": "parent 1",
"Branches": "two",
"Pull requests": "k",
"Workspaces": "four",
"Last commit": "five",
},
{
"Repositories": "parent 2",
"Branches": "two",
"Pull requests": "b",
"Workspaces": "four",
"Last commit": "five",
},
{
"Repositories": "parent 3",
"Branches": "2",
"Pull requests": "b",
"Workspaces": "four",
"Last commit": "five",
},
{
"Repositories": "parent 4",
"Branches": "2",
"Pull requests": "b",
"Workspaces": "four",
"Last commit": "five",
},
{
"Repositories": "parent 5",
"Branches": "2",
"Pull requests": "b",
"Workspaces": "four",
"Last commit": "five",
},
{
"Repositories": "parent 6",
"Branches": "2",
"Pull requests": "b",
"Workspaces": "four",
"Last commit": "five",
},
]
row1_expected_content = "single cell"
row2_expected_content = "single cell - fullWidth"
row3_expected_content = "single cell - noPadding"
table = ExpandableTable(
browser, ".//div[@id='ws-react-composable-c-table-composable-expandable']/table"
)
assert table.read() == expected_read
# First row is not an expandable row
assert not table[0].is_expandable
with pytest.raises(RowNotExpandable):
table[0].expand()
parent1_row = table[1]
parent2_row = table[2]
parent3_row = table[3]
parent1_row.collapse() # The row starts out expanded on the demo page
assert not parent1_row.is_expanded
assert not parent1_row.content.is_displayed
parent1_row.expand()
assert parent1_row.is_expanded
assert parent1_row.content.is_displayed
assert parent1_row.content.read() == row1_expected_content
parent2_row.expand()
assert parent2_row.is_expanded
assert parent2_row.content.is_displayed
assert parent2_row.content.read() == row2_expected_content
parent3_row.expand()
assert parent3_row.is_expanded
assert parent3_row.content.is_displayed
assert parent3_row.content.read() == row3_expected_content
@pytest.mark.parametrize(
"use_different_widgets",
[True, False],
ids=["diff-widgets-expandable-content", "same-widget-expandable-content"],
)
def test_compound_expandable_table(browser, use_different_widgets):
table_read = [
{
"Repositories": "siemur/test-space",
"Branches": "10",
"Pull requests": "4",
"Workspaces": "4",
"Last commit": "20 minutes",
5: "Open in Github",
},
{
"Repositories": "siemur/test-space",
"Branches": "3",
"Pull requests": "4",
"Workspaces": "2",
"Last commit": "10 minutes",
5: "Open in Github",
},
]
row0_branches_read = {
"table": [
{
"Repositories": "parent-0",
"Branches": "compound-1",
"Pull requests": "three",
"Workspaces": "four",
"Last Commit": "five",
},
{
"Repositories": "a",
"Branches": "two",
"Pull requests": "k",
"Workspaces": "four",
"Last Commit": "five",
},
{
"Repositories": "p",
"Branches": "two",
"Pull requests": "b",
"Workspaces": "four",
"Last Commit": "five",
},
]
}
row0_pull_requests_read = {
"table": [
{
"Repositories": "parent-0",
"Branches": "compound-2",
"Pull requests": "three",
"Workspaces": "four",
"Last Commit": "five",
},
{
"Repositories": "a",
"Branches": "two",
"Pull requests": "k",
"Workspaces": "four",
"Last Commit": "five",
},
{
"Repositories": "p",
"Branches": "two",
"Pull requests": "b",
"Workspaces": "four",
"Last Commit": "five",
},
]
}
row0_workspaces_read = {
"table": [
{
"Repositories": "parent-0",
"Branches": "compound-3",
"Pull requests": "three",
"Workspaces": "four",
"Last Commit": "five",
},
{
"Repositories": "a",
"Branches": "two",
"Pull requests": "k",
"Workspaces": "four",
"Last Commit": "five",
},
{
"Repositories": "p",
"Branches": "two",
"Pull requests": "b",
"Workspaces": "four",
"Last Commit": "five",
},
]
}
row1_branches_read = {
"table": [
{
"Repositories": "parent-1",
"Branches": "compound-1",
"Pull requests": "three",
"Workspaces": "four",
"Last Commit": "five",
},
{
"Repositories": "a",
"Branches": "two",
"Pull requests": "k",
"Workspaces": "four",
"Last Commit": "five",
},
{
"Repositories": "p",
"Branches": "two",
"Pull requests": "b",
"Workspaces": "four",
"Last Commit": "five",
},
]
}
if use_different_widgets:
# for the example table all the expanded tables are the same, besides the different id
# that we use in the locator
class _Branches(View):
table = PatternflyTable(locator=".//table[contains(@id, '_1')]")
class _PullRequests(View):
table = PatternflyTable(locator=".//table[contains(@id, '_2')]")
class _Workspaces(View):
table = PatternflyTable(locator=".//table[contains(@id, '_3')]")
content_view = {1: _Branches(), 2: _PullRequests(), 3: _Workspaces()}
else:
# use the same content_view for all the tables
class _ContentView(View):
"""View for the nested table(s) in the expandable columns."""
table = PatternflyTable(locator=".//table[@aria-label='Sortable Table']")
content_view = _ContentView()
table = CompoundExpandableTable(
browser, ".//table[@aria-label='Compound expandable table']", content_view=content_view
)
assert table.read() == table_read
# Make sure that the appropriate columns are expandable
for row in table.rows():
assert not row.repositories.is_expandable
assert not row.last_commit.is_expandable
assert row.branches.is_expandable
assert row.pull_requests.is_expandable
assert row.workspaces.is_expandable
# first column is not an expandable column
with pytest.raises(ColumnNotExpandable):
table[0][0].expand()
with pytest.raises(ColumnNotExpandable):
table[1][0].expand()
# first row
row0 = table[0]
row0.branches.expand()
assert row0.branches.is_expanded
assert row0.branches.content.read() == row0_branches_read
row0.branches.collapse()
assert not row0.branches.is_expanded
row0.pull_requests.expand()
assert row0.pull_requests.is_expanded
assert row0.pull_requests.content.read() == row0_pull_requests_read
row0.pull_requests.collapse()
assert not row0.pull_requests.is_expanded
row0.workspaces.expand()
assert row0.workspaces.is_expanded
assert row0.workspaces.content.read() == row0_workspaces_read
row0.workspaces.collapse()
assert not row0.workspaces.is_expanded
# second row, just test the first expandable column
row1 = table[1]
row1.branches.expand()
assert row1.branches.is_expanded
assert row1.branches.content.read() == row1_branches_read
row1.branches.collapse()
assert not row1.branches.is_expanded
|
986,675 | 23b902105abf4e8fdf51b46137d1a4fa4279be57 | import unittest
from xml.sax.handler import property_dom_node
from dojo import *
input = """
RRLLDDRLLDURLDUUDULDLRLDDDRLDULLRDDRDLUUDLLRRDURRLUDUDULLUUDRUURRDDDDURUULLDULRLRRRDLLLRDLRDULDLRUUDRURLULURUUDRLUUDRDURUUDDDRDLLRDLUDRLUUUUUULDURDRDDURLDDRLUUDLRURRDRLDRDLRRRLURULDLUUURDRLUULRDUDLDRRRDDLDDDRLRLLDRDUURDULUURRRRUDLLUDLDRLLRRULLLRDRDRRDRDRDUULUDLULRLLDRULURLURDLLDDRRLUDRDUULLDRULLLRLULUDDLURLUULDRUDRLDUUDDLLLRURDRLDRLUUUUUUDRUDLDLULULRRURRDDDUDRRRDDDLDDLRLLDDUULLUDRURDDDRDDLURRURULULUUDRLLUUDDDRUULRDLDLLRUUDRRLRRRULLRRURUDDUUDULDUDUUUDURUDUUDUDRULUDULRDUDUUUUDLLURDLRRUDURDDUULLDLLRDUDULRLRDURLRDRDLRDLRURUDURLULDDDLDRLULLRLURRLLDLRLLULLDUURUUDRULDDUDLDDR
LUURULURUURRRDLRDRDDDUDULRDDLUUDUUULRLRRLRUDDLDLURLRULUUDUUDLDRLLUURLUUURDUDLUULLUUDUUDRDUDUDURLLURDUDLDLDDLDUDRLLUULDDRUDDDRLRUDRDUDULLRRDLLDDLRLLLRLRURURLLDULUDDUULULDRRLUURDULRULUDULDULDULRULLLRRDRLDRLDUULLDDULRLUDLLLULDDLULLUULUURRULDLUULDRRUDDDLRDLULDULDRRDLRRRLRUDURUDDDUDDURRRLDUULRDDLRRRLRUULDDURDRDUULDLLULULDRDRUULDLULRUUDUDLUDRLRDURRRRLULURDRLLLUDRRRDRURRLRLLUURDLURLURDULURUDDULLDUUDDLRLUULRDUUDRDRUDRLUUUDURLLRDRRDRURDDDDULLDDUDLDUUDLRLURURLDRRDRDUDLRRDRUDRDLURRLLLULULULRUDDDULULULDDRRLULUUUURDDURURLDLDDDDDRULUUUULLUDDDRRLUULDUULRUUDUURRLLRDDRUURL
RRRLLLLUULLRRLLDRULULLLRDLLDLDDLURUDLULUULLRURLDULLUDULDRURDURLULLDUDDRLLRUURDLLULUURLULLDLRRDDDULUURRUDRDRDURULDLLURUDLLLDDUDLLLLRLDRDRDDRRDLUUDLLLULLLLLDDRDLULLLLRURRRUUUULLDLRDLDLRRRULRRRRLDLLRDURULDDLURURUULULDRDDDURLRDDRDULLUURUDUUUDRDRRLURULULRLUUDDRDULDRLULULUULRLDRLUDRRDDDRUDDRDDRDDRRLRDLRURDULULRRUUURDRRRDURDDRUDULUUDRDDLDRDDDULDLRDUULDUULRUDLRRDDDLLDDLLLRRDLDUULUULULURRULLRRUDUDRUDRRRLDLRLURRLUDLLLUUDDUDRURUUDDURDURULRLDUDRDLULDUDRUDDDR
DRDRRUUUUURRLUDLDLRUUULRLDLRRRDDUDLUUDUDDLRDUDLRRLLURUUDULLUDLULLDLLDDULUDDUDUULURLDLDDUUDDRDDRLUURLUUDUDUDURULLDRLDDRUDLURRRLDRLRULDDLDDLDDDULDUDDLLDULUUDUDDUULDRLDRLRURDULUDDRRLRRLRRDULDRRDUDUDRLDURRLRLRDLRLRRLRDDDRULLULULDUDDLDLULRLLURRRRULUULRUDLDLRDLLURURUUURDRRRLDDRLRLURDDURDDUURUUUDDLRUURRRDLLUULUDRLDDRDDDDUUDRLRRDULDULRDLLLRULULLRDRULLRDLRUUURRRURLRRDLDRDDLURLDLULLDUURRDULUUURRLLDDUUUULDDDDLRDDDRDLDLLUURLDDRULUDDRDDDLRDU
DRRRLURUDUDUULDLLURLUUDDRRRDUDLURLLDRRLLDDURULUDUURURULLRLDLLUURDLLDLLDLDLRUDLLLLDRLLUDLLDULDDRRURDRRLRLRRUURRUDURRLDRDLDURUULUDRLLURLUDURDULLRLLDLURLRRDLLLLUUDRDULLDLURDULDRDURRRLDRLURULULURLLLRRRUULRLRRDRDDDLULRLRRDLUDDUUUUUDRRDLDUDUURLDRRRLRUDRULDRLURUULRDLDDLRURDLURULRRLDURLDUURURULRDUDRRUDUDDLRLUURURULDDLULULULDULRRLRRURUURLRLLDRRLUDLUURDRRURRUUDULRLURRLRLRDDRURDDLRRDUDRLLUUUDULRDRULUURRLRLRDUDULDRDLLUDRDLLDRULDLUUURDDRDDUDDULLLDRRDDUDDDDLDDRLRULRRRURRDRULLDDDURDDLURRDDDUDLURRUDUDLLDDDLRUUDRLRRLRDUUUDDL
""".strip()
input_test = """ULL
RRDDD
LURDL
UUUUD"""
class DojoTest(unittest.TestCase):
def test_true(self):
self.assertTrue(main())
def test_process_command_up(self):
self.assertEqual(process_command("U", 5), 2)
def test_process_command_left_1(self):
self.assertEqual(process_command("L", 2), 1)
def test_process_command_left_2(self):
self.assertEqual(process_command("L", 1), 1)
def test_process_command_down_2(self):
self.assertEqual(process_command("D", 1), 4)
def test_process_command_down_2(self):
self.assertEqual(process_command("D", 7), 7)
def test_process_line(self):
self.assertEqual(process_line("ULL", 5), 1)
def test_process_line_2(self):
self.assertEqual(process_line("RRDDD", 1), 9)
def test_process_line_3(self):
self.assertEqual(process_line("LURDL", 9), 8)
def test_process_line_4(self):
self.assertEqual(process_line("UUUUD", 8), 5)
def test_aoc_1_test(self):
self.assertEqual(aoc_1(input_test), 1985)
def test_aoc_1(self):
self.assertEqual(aoc_1(input), 82958)
def test_process_line_2_(self):
self.assertEqual(process_line_2("ULL", 5), 5)
def test_process_line_2__2(self):
self.assertEqual(process_line_2("RRDDD", 5), 'D')
def test_process_line_2__3(self):
self.assertEqual(process_line_2("LURDL", 'D'), 'B')
def test_process_line_2__4(self):
self.assertEqual(process_line_2("UUUUD", 'B'), 3)
def test_aoc_2_test(self):
self.assertEqual(aoc_2(input_test), "5DB3")
def test_aoc_2(self):
self.assertEqual(aoc_2(input), "B3DB8")
if __name__ == '__main__':
unittest.main()
###
# array = [[123], [456], [789]]
# 1 - Ler a linha - read_line
# 1.1 - Ler o caracter (comando)
# 1.1.1 - Processar o comando em cima do array - process_command(char = U, last_step = 5) -> actual_step = 2
# 1.1.2 - Validar se comando da out of bounds no array
# 1.2 - Devolver o resultado do ultimo comando ao final da linha
#
# 1 2 3
# 4 5 6
# 7 8 9
#
# RRLLDDRLLDURLDUUDULDLRLDDDRLDULLRDDRDLUUDLLRRDURRLUDUDULLUUDRUURRDDDDURUULLDULRLRRRDLLLRDLRDULDLRUUDRURLULURUUDRLUUDRDURUUDDDRDLLRDLUDRLUUUUUULDURDRDDURLDDRLUUDLRURRDRLDRDLRRRLURULDLUUURDRLUULRDUDLDRRRDDLDDDRLRLLDRDUURDULUURRRRUDLLUDLDRLLRRULLLRDRDRRDRDRDUULUDLULRLLDRULURLURDLLDDRRLUDRDUULLDRULLLRLULUDDLURLUULDRUDRLDUUDDLLLRURDRLDRLUUUUUUDRUDLDLULULRRURRDDDUDRRRDDDLDDLRLLDDUULLUDRURDDDRDDLURRURULULUUDRLLUUDDDRUULRDLDLLRUUDRRLRRRULLRRURUDDUUDULDUDUUUDURUDUUDUDRULUDULRDUDUUUUDLLURDLRRUDURDDUULLDLLRDUDULRLRDURLRDRDLRDLRURUDURLULDDDLDRLULLRLURRLLDLRLLULLDUURUUDRULDDUDLDDR
# LUURULURUURRRDLRDRDDDUDULRDDLUUDUUULRLRRLRUDDLDLURLRULUUDUUDLDRLLUURLUUURDUDLUULLUUDUUDRDUDUDURLLURDUDLDLDDLDUDRLLUULDDRUDDDRLRUDRDUDULLRRDLLDDLRLLLRLRURURLLDULUDDUULULDRRLUURDULRULUDULDULDULRULLLRRDRLDRLDUULLDDULRLUDLLLULDDLULLUULUURRULDLUULDRRUDDDLRDLULDULDRRDLRRRLRUDURUDDDUDDURRRLDUULRDDLRRRLRUULDDURDRDUULDLLULULDRDRUULDLULRUUDUDLUDRLRDURRRRLULURDRLLLUDRRRDRURRLRLLUURDLURLURDULURUDDULLDUUDDLRLUULRDUUDRDRUDRLUUUDURLLRDRRDRURDDDDULLDDUDLDUUDLRLURURLDRRDRDUDLRRDRUDRDLURRLLLULULULRUDDDULULULDDRRLULUUUURDDURURLDLDDDDDRULUUUULLUDDDRRLUULDUULRUUDUURRLLRDDRUURL
# RRRLLLLUULLRRLLDRULULLLRDLLDLDDLURUDLULUULLRURLDULLUDULDRURDURLULLDUDDRLLRUURDLLULUURLULLDLRRDDDULUURRUDRDRDURULDLLURUDLLLDDUDLLLLRLDRDRDDRRDLUUDLLLULLLLLDDRDLULLLLRURRRUUUULLDLRDLDLRRRULRRRRLDLLRDURULDDLURURUULULDRDDDURLRDDRDULLUURUDUUUDRDRRLURULULRLUUDDRDULDRLULULUULRLDRLUDRRDDDRUDDRDDRDDRRLRDLRURDULULRRUUURDRRRDURDDRUDULUUDRDDLDRDDDULDLRDUULDUULRUDLRRDDDLLDDLLLRRDLDUULUULULURRULLRRUDUDRUDRRRLDLRLURRLUDLLLUUDDUDRURUUDDURDURULRLDUDRDLULDUDRUDDDR
# DRDRRUUUUURRLUDLDLRUUULRLDLRRRDDUDLUUDUDDLRDUDLRRLLURUUDULLUDLULLDLLDDULUDDUDUULURLDLDDUUDDRDDRLUURLUUDUDUDURULLDRLDDRUDLURRRLDRLRULDDLDDLDDDULDUDDLLDULUUDUDDUULDRLDRLRURDULUDDRRLRRLRRDULDRRDUDUDRLDURRLRLRDLRLRRLRDDDRULLULULDUDDLDLULRLLURRRRULUULRUDLDLRDLLURURUUURDRRRLDDRLRLURDDURDDUURUUUDDLRUURRRDLLUULUDRLDDRDDDDUUDRLRRDULDULRDLLLRULULLRDRULLRDLRUUURRRURLRRDLDRDDLURLDLULLDUURRDULUUURRLLDDUUUULDDDDLRDDDRDLDLLUURLDDRULUDDRDDDLRDU
# DRRRLURUDUDUULDLLURLUUDDRRRDUDLURLLDRRLLDDURULUDUURURULLRLDLLUURDLLDLLDLDLRUDLLLLDRLLUDLLDULDDRRURDRRLRLRRUURRUDURRLDRDLDURUULUDRLLURLUDURDULLRLLDLURLRRDLLLLUUDRDULLDLURDULDRDURRRLDRLURULULURLLLRRRUULRLRRDRDDDLULRLRRDLUDDUUUUUDRRDLDUDUURLDRRRLRUDRULDRLURUULRDLDDLRURDLURULRRLDURLDUURURULRDUDRRUDUDDLRLUURURULDDLULULULDULRRLRRURUURLRLLDRRLUDLUURDRRURRUUDULRLURRLRLRDDRURDDLRRDUDRLLUUUDULRDRULUURRLRLRDUDULDRDLLUDRDLLDRULDLUUURDDRDDUDDULLLDRRDDUDDDDLDDRLRULRRRURRDRULLDDDURDDLURRDDDUDLURRUDUDLLDDDLRUUDRLRRLRDUUUDDL
# pedro
# carreia
# christian
# raphael
###
|
986,676 | 9fbcfe6f01fad1b85c96cb7319100ccc25dc8ea6 | import re
import pytest
from grepper import Grepper
@pytest.fixture(scope="function")
def regex(request):
return re.compile(request.param)
@pytest.mark.parametrize(
"token_capture,regex",
[
("%{0}", "^(?P<token_0>.*?)$"),
("%{1}", "^(?P<token_1>.*?)$"),
("%{0S1}", r"^(?P<token_0>(?:\S*\s\S*){1}\S*)$"),
],
indirect=["regex"],
)
def test_map_token_capture_to_re(token_capture, regex):
mapped = Grepper.map_pattern_to_re(re.escape(token_capture))
assert mapped == regex
def test_raises_when_no_patterns_provided():
with pytest.raises(ValueError):
Grepper()
SIMPLE_PATTERN = "foo %{0} is a %{1}"
SPACE_LIMITATION_PATTERN = "foo %{0} is a %{1S0}"
GREEDY_PATTERN = "bar %{0G} foo %{1}"
@pytest.mark.parametrize(
"pattern,test_string,expected",
[
(SIMPLE_PATTERN, "foo blah is a bar", True),
(SIMPLE_PATTERN, "foo blah is a very big boat", True),
(SIMPLE_PATTERN, "foo blah is bar", False),
(SIMPLE_PATTERN, "foo blah", False),
(SIMPLE_PATTERN, "foo blah is", False),
(SPACE_LIMITATION_PATTERN, "foo blah is a bar", True),
(SPACE_LIMITATION_PATTERN, "foo blah is a very big boat", False),
(SPACE_LIMITATION_PATTERN, "foo blah is bar", False),
(SPACE_LIMITATION_PATTERN, "foo blah", False),
(SPACE_LIMITATION_PATTERN, "foo blah is", False),
(GREEDY_PATTERN, "bar foo bar foo bar foo bar foo", True),
],
)
def test_patterns(pattern, test_string, expected):
grepper = Grepper(pattern)
assert bool(grepper.match_line(test_string)) == expected
def test_adjacent_capture_sequences():
grepper = Grepper("the %{0S1} %{1} ran away")
match = grepper.match_line("the big brown fox ran away")
assert bool(match)
assert match.token(0) == "big brown"
assert match.token(1) == "fox"
assert match.re == re.compile(
"^the\\ (?P<token_0>(?:\\S*\\s\\S*){1}\\S*)\\ (?P<token_1>.*?)\\ ran\\ away$"
)
assert match.pattern == "the %{0S1} %{1} ran away"
def test_raises_on_duplicate_tokens():
with pytest.raises(ValueError):
Grepper("foo %{0} is a %{0}")
@pytest.mark.parametrize(
"line,expected", [("foo", True), ("baz", True), ("blah", False)]
)
def test_matches_multiple_patterns(line, expected):
grepper = Grepper("foo", "baz")
assert bool(grepper.match_line(line)) == expected
|
986,677 | 08958112f1787cfed39d247d865bf8363444da57 | # -*- coding: utf-8 -*-
#
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
# Create your models here.
class Hero(models.Model):
CATEGORY_CHOICES = (
("Strength", gettext_lazy("Strength")),
("Intelligence", gettext_lazy("Intelligence")),
("Agile", gettext_lazy("Agile")),
)
name = models.CharField(max_length=200, verbose_name=_("Name"))
category = models.CharField(max_length=200, choices=CATEGORY_CHOICES, verbose_name=_("Hero Type"))
dads = models.ManyToManyField('self', symmetrical=False, through='Relationship', through_fields=('son','dad'), related_name="sons", verbose_name=_("Dad Heros"))
def __str__(self):
return self.name
def __unicode__(self):
return self.__str__()
class Relationship(models.Model):
STAR_CHOICES = (
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
)
dad = models.ForeignKey(Hero, on_delete=models.CASCADE, related_name='son', verbose_name=_("Dad Heros"))
son = models.ForeignKey(Hero, on_delete=models.CASCADE, related_name='dad', verbose_name=_("Son Heros"))
star = models.CharField(choices=STAR_CHOICES, max_length=10, verbose_name=_("Restrain Level"))
reason = models.TextField(blank=True, verbose_name=_('Reason'))
class Meta:
unique_together = ('dad', 'son')
def __str__(self):
return "%s-%s" % (self.dad.name, self.son.name)
def __unicode__(self):
return self.__str__() |
986,678 | ae9120cbb7a7f5365b4269a206bd710c4ba98116 | from parse import *
def car_start_look_ahead(start, cars, time, class_adj_list):
iter_count = 17
cars_data = []
for car in range(cars):
cars_data.append([start])
max_sum = 0
time_left = time
current_node = start
while time_left > 0:
for edge in class_adj_list[current_node]:
sum_of_iter_costs = 0
guess_node = current_node
visited = []
time_left_guess = time_left
# source -> possibility 1
# after possibility 1's iter steps:
# current_node = start
# time_left = initial_time = time
for _ in range(iter_count):
while sum_of_iter_costs < time_left:
cost, node, edge = findGreedyandReturnCost(guess_node, time_left, class_adj_list)
if cost==-1:
break
else:
sum_of_iter_costs += cost
guess_node = node
visited.append(edge)
time_left_guess -= cost
if sum_of_iter_costs > max_sum:
max_sum = sum_of_iter_costs
for node in visited:
cars_data[car].append(node.start)
node.is_visited = True
if node.direction == 2:
for nextEdg in class_adj_list[node.end]:
if nextEdg.end == node.start: # node.start (aniket) + +1
nextEdg.is_visited = True
break
# for node in range(len(visited) -1):
# for a in range(len(class_adj_list[node])):
# if node.end == class_adj_list[node][a].start:
# class_adj_list[node][a].visited = True
# if class_adj_list[node][a].direction == 2:
# class_adj_list[a][node].visited = False
else:
visited = []
# move in that direction and mark is_visited for iter_count
# for i in range(iter_count):
# chain -> a -> b -> c
#
return cars_data
# greedy Node functions
def findGreedyandReturnCost(current_pos, time_left, class_adj_list):
maxDisPerCost, maxIdx = 0, -1
tLeft = -1
cost = 0
best_possible_edge = Edge(1,1,1,1,1)
for ed in class_adj_list[current_pos]:
if (ed.is_visited == False) and (ed.cost <= time_left) and (ed.length > maxDisPerCost):
maxDisPerCost = ed.length
maxIdx = ed.end
tLeft = time_left - ed.cost
best_possible_edge = ed
# ed.is_visited = True
cost = ed.cost
# if ed.direction == 2:
# for anotherEdge in class_adj_list[ed.end]:
# if anotherEdge.end == current_pos:
# anotherEdge.is_visited = True
# break
if maxIdx != -1:
return cost, maxIdx, ed
return -1, -1, -1
|
986,679 | 72c5e10ed29955e83be7e95959053bb033300e99 | from django.contrib import admin
from .models import channel_model,course,course_session
# Register your models here.
admin.site.register(channel_model)
admin.site.register(course)
admin.site.register(course_session)
|
986,680 | 702123ecbb7ae34bc5b5e6e38fe537f2c8c651a9 | # %% Packages
import json
import tensorflow as tf
# %% Loading models and data
# Model
keras_path = "./models/oxford_flower102_fine_tuning.h5"
keras_model = tf.keras.models.load_model(keras_path)
converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
tflite_model = converter.convert()
with open("./models/oxford_flower_102.tflite", "wb") as f:
f.write(tflite_model)
# Labels
labels_path = "./data/cat_to_name.json"
with open(labels_path) as json_file:
labels_dict = json.load(json_file)
sorted_labels_dict = sorted(labels_dict.items(), key=lambda x: int(x[0]))
label_values = [x[1] for x in sorted_labels_dict]
textfile = open("./models/labels_flowers.txt", "w")
for element in label_values:
textfile.write(element + "\n")
textfile.close()
|
986,681 | 37e8c84410947e6778e51850a39932362bea13cf | from django.test import TestCase, Client
from django.db import models
from products.models import Product
from .models import Order, OrderLineItem
# Create your tests here.
class CheckoutModelTests(TestCase):
def setUp(self):
self.client = Client()
self.client.login(username='AutoTester', password='test12345')
def what_is_in_the_order_class_test(self):
order = Order.objects.create(order='This is a test',
id=self.id,
country='New Zealand')
self.assertEqual(order.order, 'This is a test')
self.assertEqual(order.id, 'This is a test')
self.assertEqual(order.country, 'New Zealand')
def what_is_in_the_orderlineitem_class_test(self):
orderlineitem = OrderLineItem.objects.create(order='This is a test',
product=self.product,
quantity='0')
self.assertEqual(orderlineitem.order, 'This is a test')
self.assertEqual(orderlineitem.id, 'This is a test')
self.assertEqual(orderlineitem.country, '0') |
986,682 | edc6e6def7a541070204b5075772b79a7b968853 | import pandas as pd
from Backend.Utilities import DictionaryReader
CONST_PATH_TO_FILE = 'resources/ghcnd-stations.txt'
CONST_PATH_TO_TWITTER = 'resources/dataless.text'
colspecs = [(0, 11), (11, 21), (21, 31), (31, 38), (39, 41), (41, 72), (72, 76), (76, 80), (80, 86)]
stations = pd.read_fwf(CONST_PATH_TO_FILE, colspecs=colspecs, header=None, index_col=0,
names=['latitude', 'longitude', 'elevation', 'state', 'name', 'GSNFLAG', 'HCNFLAG', 'WMOID'])
def lats():
return [i for i in stations['latitude']]
def lons():
return [i for i in stations['longitude']]
def coordinates():
subset = stations[['longitude', 'latitude']]
return [tuple(i) for i in subset.values]
def twitterData():
tupleList = DictionaryReader.tuplize(CONST_PATH_TO_TWITTER)
return tupleList
twitterData()
|
986,683 | 0dc03f4c3824a0e196177dfcc2ec61dfa5f2c12a | #Sam Durst
#10/4/19
import socket
import select
import sys
def main():
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create tcp client socket
port = 12000
ip = socket.gethostbyname('www.goatgoose.com')
client.connect((ip, port)) # form the connection
message = "HELLO \n"
client.send(message.encode('utf-8')) # Hardcoded handshake
client.recv(1024)
username = input("Enter username: ")
psswd = input("Enter password: ")
message2 = "AUTH:"+username+":"+psswd+"\n" # Authentication
client.send(message2.encode('utf-8'))
resp = (client.recv(1024))
while resp != b'AUTHYES\n':
if resp == b'UNIQNO\n':
print("User not unique, try again")
else:
print("Incorrect username and/or password.") # Unlimited log in attempts
username = input("Enter username: ")
psswd = input("Enter password: ")
message2 = "AUTH:" + username + ":" + psswd + "\n"
client.send(message2.encode('utf-8'))
resp = (client.recv(1024))
s = [client] # for the select method
client.recv(1024)
print("You are now authenticated")
while True:
print("Choose an option:\n1. List online users\n2. Send someone a message\n3. Sign off")
x = True
while x:
ready = select.select([sys.stdin], [], [], .5)[0] # Check every half a second for keyboard input
if ready: # if we are in this section incoming notifications will be queued
sys.stdin.flush() # housekeeping
choice = input()
if choice == '1':
message = "LIST\n"
client.send(message.encode('utf-8')) # gets list of users online
print(client.recv(1024).decode('utf-8'))
elif choice == '2':
to_user = input("User you would like to send a message to: ")
msg = input("Message: ")
message3 = "To:" + to_user + ":" + msg + '\n' # Sending message
client.send(message3.encode('utf-8'))
print("Message sent.")
else:
message = "BYE\n"
client.send(message.encode('utf-8')) # Quits program when signout occurs
print("Signing out...")
client.close()
exit(0)
x = False
else:
rd, wd, ed = select.select(s, [], [], .5) # Probes socket every half a second for incoming data
if len(rd) != 0:
rspnse = client.recv(1024).decode('utf-8')
if rspnse == "" or rspnse == "\n": # cases determine type of notification, strip newline character
pass
elif rspnse[0:4] == "From":
print("Message from " + rspnse.split(":")[1] + ": " + rspnse.split(":")[2])
elif rspnse[0:6] == "SIGNIN":
print(rspnse.split(":")[1].rstrip() + " signed in.")
elif rspnse[0:7] == "SIGNOFF":
print(rspnse.split(":")[1].rstrip() + " signed out.")
else:
print(rspnse)
main()
|
986,684 | 17bd1e450fcb898d0d2ede35314b2a07844be9f2 | #!/usr/bin/env python
import optparse, sys, os
from collections import namedtuple
from collections import defaultdict
import bleu
import random
import math
import numpy
from operator import itemgetter
optparser = optparse.OptionParser()
optparser.add_option("-n", "--nbest", dest="nbest", default=os.path.join("data", "train.nbest"), help="N-best file")
optparser.add_option("-r", "--ref", dest="ref", default=os.path.join("data", "train.fr"), help="reference")
optparser.add_option("-t", "--target", dest="tar", default=os.path.join("data", "train.en"), help="proper translation")
epoches=5
tau = 500
alpha=0.1
xi=100
theta = numpy.random.rand(6)
eta=0.1
(opts, _) = optparser.parse_args()
source = open(opts.ref).read().splitlines()
target = open(opts.tar).read().splitlines()
a_translation=namedtuple('a_translation','sentence, features, smoothed_bleu')
nbests = [[] for i in range(len(source))]
for line in open(opts.nbest):
(i, sentence, features) = line.strip().split("|||")
ind=int(i)
#stats=bleu.bleu_stats(sentence, source[ind])
stats=list(bleu.bleu_stats(sentence, target[ind]))
#test1=test[0]
bleu_smooth_score=bleu.smoothed_bleu(stats)
feature_vec=numpy.fromstring(features, sep=' ')
nbests[ind].append(a_translation(sentence,feature_vec,bleu_smooth_score))
def get_sample(nbest):
sample=[]
for i in range(0,tau):
#random_items = random.sample(nbest, 2)
#s1 = random_items[0]
#s2 = random_items[1]
s1=random.choice(nbest)
s2=random.choice(nbest)
if math.fabs(s1.smoothed_bleu - s2.smoothed_bleu) > alpha:
if s1.smoothed_bleu > s2.smoothed_bleu:
#tuple1=(s1, s2)
sample.append([s1,s2])
else:
#tuple2 = (s2 ,s1)
sample.append([s2,s1])
else:
continue
return sample
for i in range(0,epoches):
for nbest in nbests:
sample=get_sample(nbest)
#sorted_sample=sample.sort(key=lambda tup:(math.fabs(tup[0].smoothed_bleu-tup[1].smoothed_bleu)))[:xi]
sorted_sample=sorted(sample,key=lambda x: math.fabs(x[0].smoothed_bleu-x[1].smoothed_bleu))[:xi]
mistakes=0
for item in sorted_sample:
feature1=item[0].features
feature2=item[1].features
if numpy.dot(theta,feature1)<=numpy.dot(theta,feature2):
mistakes=mistakes+1
theta=theta+eta*(feature1-feature2)
else:
mistakes = mistakes + 1
theta = theta + eta * (feature2 - feature1)
def main():
print "\n".join([str(weight) for weight in theta])
if __name__ == "__main__":
main()
#features = [float(h) for h in features.strip().split()]
#w = [1.0/len(features) for _ in xrange(len(features))]
#break
#print "\n".join([str(weight) for weight in w])
|
986,685 | 42dd73ff00b71837ad40d1ba37361f7756420a26 | class kdnode:
def __init__(self):
self.dim = -1
self.val = -1
self.st = -1
self.end = -1
self.dist = 0
self.lessChild = 0
self.greaterChild = 0
self.minx = -9999
self.miny = -9999
self.width = -9999
self.height = -9999
def __lt__(self, other):
return self.dist < other.dist;
def __gt__(self, other):
return self.dist > other.dist;
def __eq__(self, other):
return self.dist == other.dist;
def __le__(self, other):
return self.dist <= other.dist;
def __ge__(self, other):
return self.dist >= other.dist;
def __ne__(self, other):
return self.dist != other.dist;
|
986,686 | 6f2f04bdb8ad94cedea699e692fb06c864fa2af5 | """Script to install Diary Peter."""
# Copyright 2016 Vincent Ahrend
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
print("Not using setuptools")
from distutils.core import setup
setup(
name="Diary Peter",
version="0.1.0",
author="Vincent Ahrend",
author_email="mail@vincentahrend.com",
url="https://github.com/ciex/diary-peter/",
scripts=["main.py", "create_database.py"],
packages=["diary_peter"],
license="Apache",
description="A Conversational Diary",
long_description=open("README.md").read(),
install_requires=open("requirements.txt").read()
)
|
986,687 | 1a1251a6bee1c84ace64438c8a01ef5bb51b7401 |
class Calculator implements AdvancedArithmetic
{
@Override
public int divisorSum(int n)
{
int sum = n;
for(int x=1;x<=n/2;x++)
{
if(n%x==0)
{
sum = sum + x;
}
}
return sum;
}
}
|
986,688 | 555fb2057ab3aa9286c3daec5c57aac97fe8e6eb | # Creates On-Demand / Site Array report
from pure_pull import *
from array_report import *
from datetime import datetime, timedelta
import pandas as pd
from math import trunc
from sharepoint import file_upload
def add_data_columns(array_name, used, charge, ondemand):
center = workbook.add_format({'align': 'center'})
currency = workbook.add_format({'num_format': '$#,##0.00'})
bold_head = workbook.add_format({'align': 'center', 'bold': True, 'font_size': 11})
data_start = 19
sna = 'array1'
dwf = 'array2'
# Iter through csv
for column in arrayCsv.iterrows():
# Put row into a dictionary
x = {'Time': column[1]['Time'].strftime('%m/%d/%Y'),
'Effective_Used (Byte)': trunc(column[1]['Effective_Used (Byte)'] / 1024 / 1024 / 1024), 'array': column[1]['Array_Name']}
# Put array data into specific columns based on Array_Name
if column[1]['Array_Name'] == array_name:
worksheet.write(f'A{data_start}', x['Time'])
worksheet.write('B18', sna, bold_head)
worksheet.write(f'{used}{data_start}', x['Effective_Used (Byte)'], center)
worksheet.write(f'{charge}18', 'Daily Charge', bold_head)
worksheet.write('F18', dwf, bold_head)
worksheet.write(f'{charge}{data_start}', round(x['Effective_Used (Byte)'] * daily_charge, 2), center)
worksheet.write(f'{ondemand}18', 'On Demand Charge', bold_head)
worksheet.write(f'{ondemand}{data_start}', round((x['Effective_Used (Byte)'] - reserve) * ondemand_charge * 12 / 365, 2), center)
data_start += 1
total_start = data_start + 1
# Add totals and change to currency format
worksheet.write(f'C{total_start}', f'=SUM(C19:C{data_start})', currency)
worksheet.write(f'D{total_start}', f'=SUM(D19:D{data_start})', currency)
worksheet.write(f'G{total_start}', f'=SUM(G19:G{data_start})', currency)
worksheet.write(f'H{total_start}', f'=SUM(H19:H{data_start})', currency)
today = datetime.today().strftime('%Y-%m-%d')
fileName = 'array_capacity.csv'
technologent_charge = 0.075
daily_charge = technologent_charge * 12 / 365
ondemand_charge = 0.06
reserve = 51200
today_date = datetime.today()
first = today_date.replace(day=1)
lastMonth = first - timedelta(days=1)
lastMonthname = lastMonth.strftime('%B')
lastMonthyear = lastMonth.strftime('%Y')
# Put raw report data into DataFrame
arrayCsv = pd.read_csv(fileName)
# Take time off date column and change to datetime
arrayCsv['Time'] = arrayCsv['Time'].str.split('T').str[0]
arrayCsv['Time'] = pd.to_datetime(arrayCsv['Time'])
arrayCsv = arrayCsv[arrayCsv['Time'].dt.strftime('%Y-%m') == lastMonth.strftime('%Y-%m')]
# Initialize writer, workbook and worksheet
writer = pd.ExcelWriter(f'report_name-{lastMonthname}{lastMonthyear}_processed.xlsx', engine='xlsxwriter', options={'nan_inf_to_errors': True})
df = pd.DataFrame()
df.to_excel(writer, sheet_name='Site Report')
workbook = writer.book
worksheet = writer.sheets['Site Report']
# Create excel formats
bold = workbook.add_format({'align': 'bottom align', 'bold': True, 'font_size': 10})
title = workbook.add_format({'align': 'bottom align', 'bold': True, 'font_size': 12})
center_array = workbook.add_format({'align': 'center', 'bold': True})
# Add static items to sheet
worksheet.insert_image('A1', 'pure.png')
worksheet.merge_range('A4:C4', 'Pure-as-a-Service Usage Report', title)
worksheet.write('A5', 'Report Period', bold)
worksheet.write('B5', lastMonth.replace(day=1).strftime('%m-%d-%Y') + ' - ' + lastMonth.strftime('%m-%d-%Y'))
worksheet.write('A6', 'Customer', bold)
worksheet.write('B6', 'customer')
worksheet.write('A7', 'Partner', bold)
worksheet.write('B7', 'Technologent')
worksheet.write('A8', 'Partner Email', bold)
worksheet.write('B8', 'email')
worksheet.write('A11', 'Service Start Date', bold)
worksheet.write('B11', '2019-10-03')
worksheet.write('A12', 'Subscription Number', bold)
worksheet.write('A13', 'Site ID', bold)
worksheet.write('B12', 'ID')
worksheet.write('B13', 'Site Name', bold)
worksheet.write('C13', 'Site Address', bold)
worksheet.write('E11', 'Reserve Capacity Commit (in GiB)', bold)
worksheet.write('E12', reserve)
worksheet.write('F11', 'demand', bold)
worksheet.write('F12', ondemand_charge)
worksheet.write('G11', 'charge', bold)
worksheet.write('G12', technologent_charge)
worksheet.write('H11', 'charge', bold)
worksheet.write('H12', daily_charge)
worksheet.write('A14', 'cluster')
worksheet.write('A15', 'cluster')
worksheet.write('B14', 'Fgrp')
worksheet.write('C14', 'address1')
worksheet.write('B15', 'grp')
worksheet.write('C15', 'address2')
worksheet.write('A17', 'Date', bold)
worksheet.merge_range('B17:D17', 'cluster', center_array)
worksheet.merge_range('F17:H17', 'cluster', center_array)
# Run data column function to add computed data to sheet
add_data_columns('array1', 'B', 'C', 'D')
add_data_columns('array2', 'F', 'G', 'H')
# Increase column width of sheet
worksheet.set_column('A:H', 20)
writer.save()
os.remove('array_capacity.csv')
file_upload(writer.path, 'folder_path')
|
986,689 | c469bfbda1a012e7ec023b622a91484f70dc465b | baseset ="characterset"
subset ="CHARACTER"
if subset and subset.casefold() in baseset.casefold():
print("Gone")
else:
print("present")
for itr in range(-100,10,5):
print(itr)
|
986,690 | bafca5bc932964d2f314e2de09166729e5e24abc | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
from writers import adml_writer
def GetWriter(config):
'''Factory method for creating ADMLWriter objects for the Chrome OS platform.
See the constructor of TemplateWriter for description of arguments.
'''
return adml_writer.ADMLWriter(['chrome_os'], config)
|
986,691 | a7baf91e35e6720076815f1cd3ec8fd0e4ad461e | import math
class Solution:
def reverse(self, x: int) -> int:
y = x
if x < 0:
x = -x
xlist = list(map(int, str(x)))
result = []
for i in reversed(range(len(xlist))):
result.append(xlist[i])
result = [str(integer) for integer in result]
result = "".join(result)
if (int(result) >= -pow(2, 31) and int(result) <= pow(2, 31) - 1 and y < 0):
return -int(result)
elif (int(result) >= -pow(2, 31) and int(result) <= pow(2, 31) - 1 and y > 0):
return int(result)
else:
return 0
s = Solution()
print(s.reverse(-120)) |
986,692 | 276cae2ff2fca8b1618124aaa5466b000d607031 | """oh_app_demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
To override specific arguments of views provided by openhumans
1. Import the specific view : eg. from openhumans.views import delete_file
2. Before importing openhumans urls, override the specific url import: eg:
urlpatterns += [
url(r'^delete/(?P<file_id>\w+)/?$', delete_file.as_view(
scuccess_url='success', not_authorized_url='login'), name='openhumans')
]
"""
from django.contrib import admin
from django.urls import include, path
from django.conf.urls import url
urlpatterns = [
path('admin/', admin.site.urls),
]
urlpatterns = [
path('', include('main.urls')),
path('admin/', admin.site.urls),
]
urlpatterns += [
url('', include(('openhumans.urls', 'openhumans')))
]
|
986,693 | 56333389f1b9afbf7c79df2ad98ce2bed66f5164 | from django.shortcuts import render
from cia.LibCIA import XML
from cia.apps.stats.models import StatsTarget
import datetime
# Legacy imports
from cia.LibCIA.Stats.Target import StatsTarget as OldStatsTarget
from cia.LibCIA.Message import FormatterArgs
from cia.LibCIA import Formatters
from cia.LibCIA.Message import Message as OldMessage
from cia.LibCIA.Formatters.Commit import CommitFormatter
from Nouvelle.Serial import Serializer
class Message:
serializer = Serializer()
formatter_factory = Formatters.getFactory()
def __init__(self, xxx_todo_changeme):
(id, xml) = xxx_todo_changeme
self.id = id
self.hex_id = "%x" % id
self.oldmsg = OldMessage(xml)
self.timestamp = datetime.datetime.fromtimestamp(
XML.digValue(self.oldmsg.xml, float, "message", "timestamp"))
self.formatter = self.formatter_factory.findMedium('xhtml', self.oldmsg)
self.is_commit = isinstance(self.formatter, CommitFormatter)
if self.is_commit:
for shortcut, path in list(XML.pathShortcuts.items()):
doc = XML.XPath(path).queryObject(self.oldmsg)
if doc:
setattr(self, shortcut, XML.shallowText(doc[0]))
def get_year_and_month(self):
"""The template needs this for use with {% ifchanged %}"""
return (self.timestamp.year, self.timestamp.month)
def to_html(self):
"""Format any message as XHTML, using the LibCIA formatters and
Nouvelle serializer. This is used as a fallback for non-commit
messages.
"""
return self.serializer.render(self.formatter.formatMessage(self.oldmsg))
def format_log(self):
return self.serializer.render(self.formatter.component_log(None, FormatterArgs(self.oldmsg)))
def format_files(self):
return self.serializer.render(self.formatter.component_files(None, FormatterArgs(self.oldmsg)))
def stats_page(request, path):
oldtarget = OldStatsTarget(path)
try:
target = StatsTarget.objects.get(path=path)
except StatsTarget.DoesNotExist:
# If we don't have a stats target in the db, create a temp
target = StatsTarget(path=path)
target.enforce_defaults()
messages = list(map(Message, oldtarget.messages.getLatest(20)))
messages.reverse()
return render(request, 'stats/stats_page.html', {
'path': path,
'target': target,
'recent_messages': messages,
})
|
986,694 | f02c414d98b3fff4c6578b9e8400ac516847a7f7 | try:
import tensorflow as tf
from latte.metrics.keras import interpolatability as K
has_tf = True
except:
has_tf = False
import numpy as np
import pytest
from latte.metrics.core import interpolatability as C
@pytest.mark.skipif(not has_tf, reason="requires tensorflow")
class TestSmoothness:
def test_smoothness(self):
core_smth = C.Smoothness()
keras_smth = K.Smoothness()
for _ in range(3):
z = np.repeat(
np.repeat(np.arange(16)[None, None, :], 16, axis=0), 8, axis=1
)
a = np.random.randn(16, 3, 16)
ztf = tf.convert_to_tensor(z)
atf = tf.convert_to_tensor(a)
core_smth.update_state(z, a)
keras_smth.update_state(ztf, atf)
val = core_smth.compute()
valtf = keras_smth.result()
np.testing.assert_allclose(val, valtf)
tf.assert_equal(val, valtf)
|
986,695 | ddcbd0531703d891d485a7e707a85d323819f306 | import pytest
import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
from torchmeta.modules import MetaModule
from torchmeta.modules.sparse import MetaEmbedding, MetaEmbeddingBag
def test_metaembedding():
meta_model = MetaEmbedding(5, 3, padding_idx=0)
model = nn.Embedding(5, 3, padding_idx=0)
assert isinstance(meta_model, MetaModule)
assert isinstance(meta_model, nn.Embedding)
# Set same weights for both models
weight = torch.randn(5, 3)
meta_model.weight.data.copy_(weight)
model.weight.data.copy_(weight)
inputs = torch.randint(5, size=(2, 7))
outputs_torchmeta = meta_model(inputs, params=None)
outputs_nn = model(inputs)
np.testing.assert_equal(outputs_torchmeta.detach().numpy(),
outputs_nn.detach().numpy())
def test_metaembedding_params():
meta_model = MetaEmbedding(5, 3, padding_idx=0)
model = nn.Embedding(5, 3, padding_idx=0)
params = OrderedDict()
params['weight'] = torch.randn(5, 3)
model.weight.data.copy_(params['weight'])
inputs = torch.randint(5, size=(2, 7))
outputs_torchmeta = meta_model(inputs, params=params)
outputs_nn = model(inputs)
np.testing.assert_equal(outputs_torchmeta.detach().numpy(),
outputs_nn.detach().numpy())
@pytest.mark.parametrize('mode', ['sum', 'mean', 'max'])
def test_metaembeddingbag(mode):
meta_model = MetaEmbeddingBag(5, 3, mode=mode)
model = nn.EmbeddingBag(5, 3, mode=mode)
assert isinstance(meta_model, MetaModule)
assert isinstance(meta_model, nn.EmbeddingBag)
# Set same weights for both models
weight = torch.randn(5, 3)
meta_model.weight.data.copy_(weight)
model.weight.data.copy_(weight)
inputs = torch.randint(5, size=(2, 7))
outputs_torchmeta = meta_model(inputs, params=None)
outputs_nn = model(inputs)
np.testing.assert_equal(outputs_torchmeta.detach().numpy(),
outputs_nn.detach().numpy())
@pytest.mark.parametrize('mode', ['sum', 'mean', 'max'])
def test_metaembeddingbag_params(mode):
meta_model = MetaEmbeddingBag(5, 3, mode=mode)
model = nn.EmbeddingBag(5, 3, mode=mode)
params = OrderedDict()
params['weight'] = torch.randn(5, 3)
model.weight.data.copy_(params['weight'])
inputs = torch.randint(5, size=(2, 7))
outputs_torchmeta = meta_model(inputs, params=params)
outputs_nn = model(inputs)
np.testing.assert_equal(outputs_torchmeta.detach().numpy(),
outputs_nn.detach().numpy())
|
986,696 | f0f769cbb9865e8152c2cf867ce15ad0b4b0ca12 | __pyarmor__(__name__, __file__, b'\xec\x50\x8c\x64\x26\x42\xd6\x01\x10\x54\xca\x9c\xb6\x33\x81\x05\x30\xdb\x46\xb3\x6d\x0c\xa2\x2d\x19\xe2\x8f\xf9\x44\x25\xd5\x53\xc7\x67\x06\x76\x71\x8d\x42\xe2\xc3\xef\x00\x16\xa3\x91\x11\x67\x09\x92\x9a\x72\x18\xf6\x31\xbd\x95\x78\x69\xe0\x84\xc1\x30\x40\x93\x62\xc4\x8e\xd6\xf0\x0d\x5b\x82\x29\x55\x63\xd9\x3e\x7a\x4e\x40\xcb\xae\x0e\x9d\x45\x72\xa4\x22\x50\xc5\x00\x06\x2d\x29\xe5\xe1\x5f\xa6\xee\x04\xfd\x9f\xc1\x10\xe5\xd5\x07\xef\xb6\xee\x36\xcc\x39\x5c\xa0\xab\x6f\x6e\x9e\xb6\xf5\x42\x3f\x41\xeb\x1b\x0a\x07\xbe\x67\x3e\x0f\x8f\xe7\x61\x8a\xcf\x9b\xa3\x22\x07\xb0\x4c\xc0\x53\x6f\xf9\x0c\x1c\x61\xfa\xa0\x28\xa4', 1) |
986,697 | 73a1962d3d66207cccfcc91650b21c6f4cd08d63 | from flask import Flask, render_template, request, g, url_for
from cronruntime import CronRunTime
from wtforms import Form, SelectField, SubmitField, validators
import os
import re
app = Flask(__name__)
filename = "crontab_cases.txt"
filepath = os.getcwd()
print(filepath)
app.static_folder = "static"
@app.route("/",methods=['GET','POST'])
def index():
crontab = CronRunTime(filename,filepath)
functions = crontab.create_functions_dictionary()
tupled_keys = [(key,key) for key in functions.keys()]
class FunctionForm(Form):
select_function = SelectField("Select your function: ",choices=tupled_keys)
submit = SubmitField("Submit")
form = FunctionForm(request.form)
if request.method == "POST":
time_fields = functions[form.select_function.data]
time_remaining_fields = crontab.return_function_next_run_time(time_fields)
real_times = ["min","hour","day","month","day of week"]
return render_template("index.html",form=form,time_remaining_fields=time_remaining_fields,real_times=real_times)
return render_template("index.html",form=form)
if __name__ == "__main__":
app.run()
|
986,698 | c6aba95986e76413311a49be797efeaefff8d67c | """Write a Python script using PEFile to enumerate all imports and exports from
Kernel32.dll, KernelBase.dll, and ntdll.dll, and store the results in a text file.
Additionally, enumerate the sections in each of those files."""
import pefile
pe = pefile.PE("C:\\Users\\DOTlaptop\\Desktop\\re_quiz\\re_lab4.1_x86.dll")
imps = {entry.dll: [imp.name for imp in entry.imports] for entry in pe.DIRECTORY_ENTRY_IMPORT}
print ' '.join(imps)
pe = pefile.PE("C:\\Users\\DOTlaptop\\Desktop\\re_quiz\\re_lab4.1_x86.dll")
exports = [(exp.name, exp.ordinal) for exp in pe.DIRECTORY_ENTRY_EXPORT.symbols]
print exports
|
986,699 | 09288499b1f50691a57a56aef598c17b0f9f81bc | while True:
diff=10000009
ans = 0
b,n=map(int, input().split())
if b==0 and n==0:
break
for a in range(1, b+1):
result = abs(b-a**n)
if result < diff:
diff = result
ans = a
print(ans)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.