max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
Speedo/plugins/ping.py | aviskumar/speedo | 0 | 6616951 | <reponame>aviskumar/speedo
import asyncio
import datetime
from . import *
@speedo.on(Speedo_cmd(pattern="ping$"))
@speedo.on(sudo_cmd(pattern="ping$", allow_sudo=True))
async def pong(speedo):
if speedo.fwd_from:
return
start = datetime.datetime.now()
event = await eor(speedo, "`·.·★ ℘ıŋɠ ★·.·´")
end = datetime.datetime.now()
ms = (end - start).microseconds / 1000
await event.edit(
f"╰•★★ ℘ơŋɠ ★★•╯\n\n ⚘ `{ms}`\n ⚘ __**Oɯɳҽɾ**__ **:** {speedo_mention}"
)
CmdHelp("ping").add_command(
"ping", None, "Checks the ping speed of your SPEEDOBOT"
).add_warning(
"✅ Harmless Module"
).add()
# Speedo
| import asyncio
import datetime
from . import *
@speedo.on(Speedo_cmd(pattern="ping$"))
@speedo.on(sudo_cmd(pattern="ping$", allow_sudo=True))
async def pong(speedo):
if speedo.fwd_from:
return
start = datetime.datetime.now()
event = await eor(speedo, "`·.·★ ℘ıŋɠ ★·.·´")
end = datetime.datetime.now()
ms = (end - start).microseconds / 1000
await event.edit(
f"╰•★★ ℘ơŋɠ ★★•╯\n\n ⚘ `{ms}`\n ⚘ __**Oɯɳҽɾ**__ **:** {speedo_mention}"
)
CmdHelp("ping").add_command(
"ping", None, "Checks the ping speed of your SPEEDOBOT"
).add_warning(
"✅ Harmless Module"
).add()
# Speedo | none | 1 | 2.65817 | 3 | |
models_with_no_copasi_or_sedml.py | sys-bio/temp-biomodels | 0 | 6616952 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 3 08:59:04 2021
@author: Lucian
"""
from os import walk
noSed = []
copasiButNoSed = []
for root, dirs, files in walk("final/"):
sedml = False
copasi = False
for file in files:
if ".cps" in file:
copasi = True
if ".sedml" in file:
sedml = True
if not copasi and not sedml:
noSed.append(root.replace("final/", ""))
elif copasi and not sedml:
copasiButNoSed.append(root.replace("final/", ""))
out = open("noSED.txt", "w")
for biomd in noSed:
out.write(biomd + "\n")
out.close()
out = open("copasiNoSED.txt", "w")
for biomd in copasiButNoSed:
out.write(biomd + "\n")
out.close()
| # -*- coding: utf-8 -*-
"""
Created on Thu Jun 3 08:59:04 2021
@author: Lucian
"""
from os import walk
noSed = []
copasiButNoSed = []
for root, dirs, files in walk("final/"):
sedml = False
copasi = False
for file in files:
if ".cps" in file:
copasi = True
if ".sedml" in file:
sedml = True
if not copasi and not sedml:
noSed.append(root.replace("final/", ""))
elif copasi and not sedml:
copasiButNoSed.append(root.replace("final/", ""))
out = open("noSED.txt", "w")
for biomd in noSed:
out.write(biomd + "\n")
out.close()
out = open("copasiNoSED.txt", "w")
for biomd in copasiButNoSed:
out.write(biomd + "\n")
out.close()
| en | 0.819561 | # -*- coding: utf-8 -*- Created on Thu Jun 3 08:59:04 2021 @author: Lucian | 2.601645 | 3 |
advent2015/day04.py | Ginsusamurai/advent_2015 | 0 | 6616953 | #!/usr/bin/env python3
import csv
import re
import json
import hashlib
if __name__ == "__main__":
with open("inputs/day04input.csv", newline='') as f:
reader = csv.reader(f)
key = list(reader)[0][0]
print("Brute Forcing...please wait")
#input the key and a number in decimal
#add key to numbers and cycle through until hash begins with 5 zeroes
#answer is what number was input
lowestInt = 0
hash = key + json.dumps(lowestInt)
while not re.search("^00000", hashlib.md5(hash.encode()).hexdigest()):
lowestInt += 1
hash = key + json.dumps(lowestInt)
print("Day04.1 -> The Number {} matches the hash requirement of 5 leading zeroes.".format(lowestInt))
print("Next step may take longer...")
lowestInt = 0
for i in range(9900000, 10000000): #narrowed the range for faster run time
print(i)
hash = key + json.dumps(i)
if re.search("^000000", hashlib.md5(hash.encode()).hexdigest()):
lowestInt = i
break
print("Day04.2 -> The number {} matches the hash requirement of 6 leading zeroes.".format(lowestInt)) | #!/usr/bin/env python3
import csv
import re
import json
import hashlib
if __name__ == "__main__":
with open("inputs/day04input.csv", newline='') as f:
reader = csv.reader(f)
key = list(reader)[0][0]
print("Brute Forcing...please wait")
#input the key and a number in decimal
#add key to numbers and cycle through until hash begins with 5 zeroes
#answer is what number was input
lowestInt = 0
hash = key + json.dumps(lowestInt)
while not re.search("^00000", hashlib.md5(hash.encode()).hexdigest()):
lowestInt += 1
hash = key + json.dumps(lowestInt)
print("Day04.1 -> The Number {} matches the hash requirement of 5 leading zeroes.".format(lowestInt))
print("Next step may take longer...")
lowestInt = 0
for i in range(9900000, 10000000): #narrowed the range for faster run time
print(i)
hash = key + json.dumps(i)
if re.search("^000000", hashlib.md5(hash.encode()).hexdigest()):
lowestInt = i
break
print("Day04.2 -> The number {} matches the hash requirement of 6 leading zeroes.".format(lowestInt)) | en | 0.881855 | #!/usr/bin/env python3 #input the key and a number in decimal #add key to numbers and cycle through until hash begins with 5 zeroes #answer is what number was input #narrowed the range for faster run time | 3.503752 | 4 |
cpydist/bdist_rpm.py | kangni/mysql-connector-python | 1 | 6616954 | <reponame>kangni/mysql-connector-python<gh_stars>1-10
# Copyright (c) 2020, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implements the Distutils commands for creating RPM packages."""
import os
import subprocess
from distutils.dir_util import mkpath
from distutils.errors import DistutilsError
from distutils.file_util import copy_file
from . import BaseCommand, EDITION, VERSION, VERSION_EXTRA
from .utils import linux_distribution
RPM_SPEC = os.path.join("cpydist", "data", "rpm",
"mysql-connector-python.spec")
LINUX_DIST = linux_distribution()
VERSION_TEXT_SHORT = "{0}.{1}.{2}".format(*VERSION[0:3])
class DistRPM(BaseCommand):
"""Create a RPM distribution."""
description = "create a RPM distribution"
user_options = BaseCommand.user_options + [
("build-base=", "d",
"base directory for build library"),
("dist-dir=", "d",
"directory to put final built distributions in"),
('pre-release', None,
"this is a pre-release (changes RPM release number)"),
("rpm-base=", "d",
"base directory for creating RPMs (default <bdist-dir>/rpm)"),
("pre-release", None,
"this is a pre-release (changes RPM release number)"),
]
build_base = None
dist_dir = None
rpm_base = None
pre_release = None
_cmd_dist_tarball = "sdist"
_rpm_dirs = {}
def finalize_options(self):
"""Finalize the options."""
BaseCommand.finalize_options(self)
self.set_undefined_options("build",
("build_base", "build_base"))
self.set_undefined_options(self._cmd_dist_tarball,
("dist_dir", "dist_dir"))
if not self.rpm_base:
self.rpm_base = os.path.abspath(
os.path.join(self.build_base, "rpmbuild"))
def _populate_rpm_topdir(self, rpm_base):
"""Create and populate the RPM topdir."""
mkpath(rpm_base)
dirs = ["BUILD", "RPMS", "SOURCES", "SPECS", "SRPMS"]
self._rpm_dirs = {}
for dirname in dirs:
self._rpm_dirs[dirname] = os.path.join(rpm_base, dirname)
self.mkpath(self._rpm_dirs[dirname])
def _check_rpmbuild(self):
"""Check if we can run rpmbuild.
Raises DistutilsError when rpmbuild is not available.
"""
try:
devnull = open(os.devnull, "w")
subprocess.Popen(["rpmbuild", "--version"],
stdin=devnull, stdout=devnull, stderr=devnull)
except OSError:
raise DistutilsError("Could not execute rpmbuild. Make sure "
"it is installed and in your PATH")
def _create_rpm(self, rpm_name, spec):
"""Create RPM."""
self.log.info("Creating RPM using rpmbuild")
macro_bdist_dir = "bdist_dir {}".format(os.path.join(rpm_name, ""))
cmd = [
"rpmbuild",
"-ba",
"--define", macro_bdist_dir,
"--define", "_topdir {}".format(os.path.abspath(self.rpm_base)),
"--define", "version {}".format(VERSION_TEXT_SHORT),
spec
]
if not self.verbose:
cmd.append("--quiet")
if EDITION:
cmd.extend(["--define", "edition {}".format(EDITION)])
if self.label:
cmd.extend(["--define", "label {}".format(self.label)])
if self.byte_code_only:
cmd.extend(["--define", "byte_code_only 1"])
cmd.extend(["--define", "lic_type Commercial"])
if self.pre_release:
cmd.extend(["--define", "pre_release 1"])
if VERSION_EXTRA:
cmd.extend(["--define", "version_extra {}".format(VERSION_EXTRA)])
cmd.extend(["--define", "mysql_capi {}".format(self.with_mysql_capi)])
if self.with_openssl_include_dir:
cmd.extend(["--define", "openssl_include_dir {}"
"".format(self.with_openssl_include_dir)])
cmd.extend(["--define", "openssl_lib_dir {}"
"".format(self.with_openssl_lib_dir)])
cmd.extend(["--define", "protobuf_include_dir {}"
"".format(self.with_protobuf_include_dir)])
cmd.extend(["--define", "protobuf_lib_dir {}"
"".format(self.with_protobuf_lib_dir)])
cmd.extend(["--define", "protoc {}".format(self.with_protoc)])
if self.extra_compile_args:
cmd.extend(["--define", "extra_compile_args '{0}'"
"".format(self.extra_compile_args)])
if self.extra_link_args:
cmd.extend(["--define",
"extra_link_args '{0}'".format(self.extra_link_args)])
self.spawn(cmd)
for base, dirs, files in os.walk(self.rpm_base):
for filename in files:
if filename.endswith(".rpm"):
filepath = os.path.join(base, filename)
copy_file(filepath, self.dist_dir)
def run(self):
"""Run the command."""
if not self.dry_run:
self._check_rpmbuild()
self.mkpath(self.dist_dir)
self._populate_rpm_topdir(self.rpm_base)
cmd_sdist = self.get_finalized_command(self._cmd_dist_tarball)
cmd_sdist.dist_dir = self._rpm_dirs["SOURCES"]
cmd_sdist.label = self.label
cmd_sdist.run()
rpm_name = "mysql-connector-python-{}".format("-{}".format(self.label)
if self.label else "")
self._create_rpm(rpm_name=rpm_name, spec=RPM_SPEC)
self.remove_temp()
| # Copyright (c) 2020, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implements the Distutils commands for creating RPM packages."""
import os
import subprocess
from distutils.dir_util import mkpath
from distutils.errors import DistutilsError
from distutils.file_util import copy_file
from . import BaseCommand, EDITION, VERSION, VERSION_EXTRA
from .utils import linux_distribution
RPM_SPEC = os.path.join("cpydist", "data", "rpm",
"mysql-connector-python.spec")
LINUX_DIST = linux_distribution()
VERSION_TEXT_SHORT = "{0}.{1}.{2}".format(*VERSION[0:3])
class DistRPM(BaseCommand):
"""Create a RPM distribution."""
description = "create a RPM distribution"
user_options = BaseCommand.user_options + [
("build-base=", "d",
"base directory for build library"),
("dist-dir=", "d",
"directory to put final built distributions in"),
('pre-release', None,
"this is a pre-release (changes RPM release number)"),
("rpm-base=", "d",
"base directory for creating RPMs (default <bdist-dir>/rpm)"),
("pre-release", None,
"this is a pre-release (changes RPM release number)"),
]
build_base = None
dist_dir = None
rpm_base = None
pre_release = None
_cmd_dist_tarball = "sdist"
_rpm_dirs = {}
def finalize_options(self):
"""Finalize the options."""
BaseCommand.finalize_options(self)
self.set_undefined_options("build",
("build_base", "build_base"))
self.set_undefined_options(self._cmd_dist_tarball,
("dist_dir", "dist_dir"))
if not self.rpm_base:
self.rpm_base = os.path.abspath(
os.path.join(self.build_base, "rpmbuild"))
def _populate_rpm_topdir(self, rpm_base):
"""Create and populate the RPM topdir."""
mkpath(rpm_base)
dirs = ["BUILD", "RPMS", "SOURCES", "SPECS", "SRPMS"]
self._rpm_dirs = {}
for dirname in dirs:
self._rpm_dirs[dirname] = os.path.join(rpm_base, dirname)
self.mkpath(self._rpm_dirs[dirname])
def _check_rpmbuild(self):
"""Check if we can run rpmbuild.
Raises DistutilsError when rpmbuild is not available.
"""
try:
devnull = open(os.devnull, "w")
subprocess.Popen(["rpmbuild", "--version"],
stdin=devnull, stdout=devnull, stderr=devnull)
except OSError:
raise DistutilsError("Could not execute rpmbuild. Make sure "
"it is installed and in your PATH")
def _create_rpm(self, rpm_name, spec):
"""Create RPM."""
self.log.info("Creating RPM using rpmbuild")
macro_bdist_dir = "bdist_dir {}".format(os.path.join(rpm_name, ""))
cmd = [
"rpmbuild",
"-ba",
"--define", macro_bdist_dir,
"--define", "_topdir {}".format(os.path.abspath(self.rpm_base)),
"--define", "version {}".format(VERSION_TEXT_SHORT),
spec
]
if not self.verbose:
cmd.append("--quiet")
if EDITION:
cmd.extend(["--define", "edition {}".format(EDITION)])
if self.label:
cmd.extend(["--define", "label {}".format(self.label)])
if self.byte_code_only:
cmd.extend(["--define", "byte_code_only 1"])
cmd.extend(["--define", "lic_type Commercial"])
if self.pre_release:
cmd.extend(["--define", "pre_release 1"])
if VERSION_EXTRA:
cmd.extend(["--define", "version_extra {}".format(VERSION_EXTRA)])
cmd.extend(["--define", "mysql_capi {}".format(self.with_mysql_capi)])
if self.with_openssl_include_dir:
cmd.extend(["--define", "openssl_include_dir {}"
"".format(self.with_openssl_include_dir)])
cmd.extend(["--define", "openssl_lib_dir {}"
"".format(self.with_openssl_lib_dir)])
cmd.extend(["--define", "protobuf_include_dir {}"
"".format(self.with_protobuf_include_dir)])
cmd.extend(["--define", "protobuf_lib_dir {}"
"".format(self.with_protobuf_lib_dir)])
cmd.extend(["--define", "protoc {}".format(self.with_protoc)])
if self.extra_compile_args:
cmd.extend(["--define", "extra_compile_args '{0}'"
"".format(self.extra_compile_args)])
if self.extra_link_args:
cmd.extend(["--define",
"extra_link_args '{0}'".format(self.extra_link_args)])
self.spawn(cmd)
for base, dirs, files in os.walk(self.rpm_base):
for filename in files:
if filename.endswith(".rpm"):
filepath = os.path.join(base, filename)
copy_file(filepath, self.dist_dir)
def run(self):
"""Run the command."""
if not self.dry_run:
self._check_rpmbuild()
self.mkpath(self.dist_dir)
self._populate_rpm_topdir(self.rpm_base)
cmd_sdist = self.get_finalized_command(self._cmd_dist_tarball)
cmd_sdist.dist_dir = self._rpm_dirs["SOURCES"]
cmd_sdist.label = self.label
cmd_sdist.run()
rpm_name = "mysql-connector-python-{}".format("-{}".format(self.label)
if self.label else "")
self._create_rpm(rpm_name=rpm_name, spec=RPM_SPEC)
self.remove_temp() | en | 0.902621 | # Copyright (c) 2020, Oracle and/or its affiliates. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License, version 2.0, as # published by the Free Software Foundation. # # This program is also distributed with certain software (including # but not limited to OpenSSL) that is licensed under separate terms, # as designated in a particular file or component or in included license # documentation. The authors of MySQL hereby grant you an # additional permission to link the program and your derivative works # with the separately licensed software that they have included with # MySQL. # # Without limiting anything contained in the foregoing, this file, # which is part of MySQL Connector/Python, is also subject to the # Universal FOSS Exception, version 1.0, a copy of which can be found at # http://oss.oracle.com/licenses/universal-foss-exception. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License, version 2.0, for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Implements the Distutils commands for creating RPM packages. Create a RPM distribution. Finalize the options. Create and populate the RPM topdir. Check if we can run rpmbuild. Raises DistutilsError when rpmbuild is not available. Create RPM. Run the command. | 1.438094 | 1 |
nDGP/nonlinearLimber.py | billwright93/pybird | 0 | 6616955 | <reponame>billwright93/pybird
import os
import numpy as np
from numpy import pi, cos, sin, log, exp, sqrt, trapz
from scipy.interpolate import interp1d
from scipy.special import gamma
from fftlog import FFTLog, MPC
from nonlinear import M13a, M22a
from common import co
#### LOOP OVER nlens and nsource !!!
class Limber(object):
"""
...
Attributes
----------
co : class
An object of type Common() used to share data
"""
def __init__(self, theta, z, nlens, nsource, gg=True, load=True, save=True, path='./', NFFT=256, km=1.):
self.gg = gg
self.km = km
self.z = z
self.theta, _ = np.meshgrid(theta, z, indexing='ij')
self.nlens = np.asarray(nlens)
self.nsource = np.asarray(nsource)
self.Ng = self.nlens.shape[0]
self.Ns = self.nsource.shape[0]
self.Nss = self.Ns*(self.Ns+1)//2
self.Nsg = self.Ns*self.Ng
self.Ngg = self.Ng
self.N = max([self.Nss, self.Nsg])
self.fftsettings = dict(Nmax=NFFT, xmin=1.5e-5, xmax=1.e3, bias=-1.6)
self.fft = FFTLog(**self.fftsettings)
if self.gg: self.pyegg = os.path.join(path, 'pyegg%s_limber.npz') % (NFFT)
else: self.pyegg = os.path.join(path, 'pyegg%s_limber_nogg.npz') % (NFFT)
if load is True:
try:
L = np.load( self.pyegg )
if (self.fft.Pow - L['Pow']).any():
print ('Loaded loop matrices do not correspond to asked FFTLog configuration. \n Computing new matrices.')
load = False
else:
self.M11, self.M22, self.M13, self.Mct = L['M11'], L['M22'], L['M13'], L['Mct']
save = False
except:
print ('Can\'t load loop matrices at %s. \n Computing new matrices.' % path)
load = False
if load is False:
self.setM()
self.setM11()
self.setMct()
self.setM13()
self.setM22()
if save is True:
try: np.savez(self.pyegg, Pow=self.fft.Pow, M11=self.M11, M22=self.M22, M13=self.M13, Mct=self.Mct)
except: print ('Can\'t save loop matrices at %s.' % path)
self.setsPow()
# To speed-up matrix multiplication:
self.optipath13 = np.einsum_path('ns,ms,bnm->bs', self.sPow, self.sPow, self.M22, optimize='optimal')[0]
self.optipath22 = np.einsum_path('ns,ms,bnm->bs', self.sPow, self.sPow, self.M13, optimize='optimal')[0]
def setsPow(self):
""" Compute the r's to the powers of the FFTLog to evaluate the loop 'ular' correlation function. Called at the instantiation of the class. """
#slog = np.geomspace(1e-4, 3., 40)
#slin = np.arange(3, 200., 1)
#slog2 = np.geomspace(200, 1e4, 20)
#self.s = np.unique(np.concatenate([slog, slin, slog2]))
self.s = np.geomspace(1.e-4, 1.e3, 200)
self.sPow = exp(np.einsum('n,s->ns', -self.fft.Pow - 3. - 0.5, log(self.s)))
def setM(self):
""" Compute the power spectrum to 'ular' correlation function spherical Bessel transform matrices. Called at the instantiation of the class if the matrices are not loaded. """
M = np.empty(shape=(3, self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
for l in range(3):
for u, n1 in enumerate(-0.5 * self.fft.Pow):
for v, n2 in enumerate(-0.5 * self.fft.Pow):
self.M[l, u, v] = (2*pi)**.5 * MPC(2 * l - 0.5, n1 + n2 - 1.5)
def setM22(self):
""" Compute the 22-loop matrices. Called at the instantiation of the class if the matrices are not loaded. """
self.Mbb22 = np.empty(shape=(6, self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
self.Mbm22 = np.empty(shape=(3, self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
self.Mmm22 = np.empty(shape=(2, self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
Ma = np.empty(shape=(self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex') # common piece of M22
Mmm = np.empty(shape=(self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex') # matter-matter M22
for u, n1 in enumerate(-0.5 * self.fft.Pow):
for v, n2 in enumerate(-0.5 * self.fft.Pow):
Ma[u, v] = M22a(n1, n2)
Mmm[u, v] = M22mm[0](n1, n2)
for i in range(6):
Mbb = np.empty(shape=(self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
Mbm = np.empty(shape=(self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
for u, n1 in enumerate(-0.5 * self.fft.Pow):
for v, n2 in enumerate(-0.5 * self.fft.Pow):
Mbb[u, v] = M22bb[i](n1, n2)
if i < 3: Mbm[u, v] = M22bb[i](n1, n2)
self.Mbb22[i] = Mbb
if i < 3: self.Mbm22[i] = Mbm
self.Mbb22 = np.einsum('nm,nm,bnm->bnm', self.M[0], Ma, self.Mbb22)
self.Mbm22 = np.einsum('nm,nm,bnm->bnm', self.M[1], Ma, self.Mbm22)
self.Mmm22 = np.einsum('lnm,nm,nm->lnm', self.M[[0,2]], Ma, Mmm)
if self.gg: self.M22 = np.hstack([self.Mmm22, self.Mbm22, self.Mbb22])
else: self.M22 = np.hstack([self.Mmm22, self.Mbm22])
def setM13(self):
""" Compute the 13-loop matrices. Called at the instantiation of the class if the matrices are not loaded. """
self.Mbb13 = np.empty(shape=(2, self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
self.Mbm13 = np.empty(shape=(2, self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
self.Mmm13 = np.empty(shape=(2, self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
Ma = M13a(-0.5 * self.fft.Pow)
Mmm = M13mm[0](-0.5 * self.fft.Pow)
for i in range(2):
self.Mbb13[i] = M13bb[i](-0.5 * self.fft.Pow)
self.Mbm13[i] = M13bm[i](-0.5 * self.fft.Pow)
self.Mbb13 = np.einsum('nm,n,bn->bnm', self.M[0], Ma, self.Mbb13)
self.Mbm13 = np.einsum('nm,n,bn->bnm', self.M[1], Ma, self.Mbm13)
self.Mmm13 = np.einsum('lnm,n,n->lnm', self.M[[0,2]], Ma, Mmm)
if self.gg: self.M13 = np.hstack([self.Mmm13, self.Mbm13, self.Mbb13])
else: self.M22 = np.hstack([self.Mmm13, self.Mbm13])
def setM11(self):
""" Compute the linear matrices. Called at the instantiation of the class if the matrices are not loaded. """
self.M11 = np.empty(shape=(3, self.fft.Pow.shape[0]), dtype='complex')
for l in range(3): self.M11[l] = (2*pi)**.5 * MPC(2 * l - 0.5, -0.5 * self.fft.Pow)
def setMct(self):
""" Compute the counterterm matrices. Called at the instantiation of the class if the matrices are not loaded. """
self.Mct = np.empty(shape=(3, self.fft.Pow.shape[0]), dtype='complex')
for l in range(3): self.Mct[l, u] = (2*pi)**.5 * MPC(2 * l - 0.5, -0.5 * self.fft.Pow - 1.)
def getA11(self, CoefsPow):
""" Perform the linear correlation function matrix multiplications """
A11 = np.real(np.einsum('ns,ln->ls', CoefsPow, self.M11))
if self.gg: return np.array([A11[0], A11[2], A11[1], A11[0]])
else: return np.array([A11[0], A11[2], A11[1]])
def getAct(self, CoefsPow):
""" Perform the counterterm correlation function matrix multiplications """
Act = self.s**-2 * np.real(np.einsum('ns,ln->ls', CoefsPow, self.Mct))
if self.gg: return np.array([Act[0], Act[2], Act[1], Act[0]])
else: return np.array([Act[0], Act[2], Act[1]])
def getA22(self, CoefsPow):
""" Perform the 22-loop correlation function matrix multiplications """
return np.real(np.einsum('ns,ms,bnm->bs', CoefsPow, CoefsPow, self.M22, optimize=self.optipath22))
def getA13(self, CoefsPow):
""" Perform the 13-loop correlation function matrix multiplications """
return np.real(np.einsum('ns,ms,bnm->bs', CoefsPow, CoefsPow, self.M13, optimize=self.optipath13))
def getA(self, bird, window=None):
coef = self.fft.Coef(bird.kin, bird.kin**-0.5 * self.Pin, window=.2)
coefsPow = np.einsum('n,ns->ns', Coef, self.sPow)
A11 = self.getA11(coefsPow, bird)
Act = self.getAct(coefsPow, bird)
A22 = self.getA22(coefsPow, bird)
A13 = self.getA13(coefsPow, bird)
return A11, Act, A22, A13
def Xi(self, bird, rz, dz_by_dr, Dz, Dfid, h, Omega0_m):
Dp2 = Dz**2 / Dfid**2
Dp4 = D2**2
lensing_factor = 1.5/conts.c**2 * h**2 * 1e10 * Omega0_m
r1, _ = np.meshgrid(rz, self.z, indexing='ij')
def lensing_efficiency(nz):
return lensing_factor * rz * (1+z) * np.trapz(np.heaviside(rz-r1, 0.) * nz * (rz-r1)/rz, x=self.z, axis=-1)
qshear = np.empty_like(self.nsource)
qgal = np.empty_like(self.nlens)
for i, ns in enumerate(self.nsource): qshear[i] = self.lensing_efficiency(ns)
for i, nl in enumerate(self.nlens): qgal[i] = dz_by_dr * nl
qsqs = np.zeros(shape=(self.N))
for i, qi in enumerate(qshear):
for j, qj in enumerate(qshear):
if qj <= qi: qsqs[i+j] = qi*qj
qsqg = np.zeros(shape=(self.N))
for i, qi in enumerate(qshear):
for j, qj in enumerate(qgal):
qsqg[i+j] = qi*qj
if self.gg:
qgqg = np.zeros(shape=(self.N))
for i, qi in enumerate(qgal):
qgqg[i] = qi**2
if self.gg:
qq11 = np.array([qsqs, qsqs, qsqd, qgqg])
qq13 = np.array([qsqs, qsqs, qsqd, qsqd, qgqg, qgqg])
qq22 = np.array([qsqs, qsqs, qsqd, qsqd, qgqg, qgqg, qgqg, qgqg, qgqg, qgqg])
else:
qq11 = np.array([qsqs, qsqs, qsqd])
qq13 = np.array([qsqs, qsqs, qsqd, qsqd])
qq22 = np.array([qsqs, qsqs, qsqd, qsqd])
def time_integral(qq, DD, A):
A1 = interp1d(self.s, A, kind='cubic', axis=-1)(self.theta * rz)
return np.trapz(np.einsum('biz,z,btz->bitz', qq, DD, A1), x=rz, axis=-1)
A11 = time_integral(qq11, Dp2, A11)
Act = time_integral(qq11, Dp2, Act)
A13 = time_integral(qq13, Dp4, A13)
A22 = time_integral(qq22, Dp4, A22)
self.Assp = np.array([A11[0], Act[0], A13[0], A22[0]])[:,:self.Nss]
self.Assm = np.array([A11[1], Act[1], A13[1], A22[1]])[:,:self.Nss]
self.Asg = np.array([A11[2], Act[2], A13[2], A13[3], A22[2], A22[3], A22[4]])[:,self.Nsg]
if self.gg: self.Agg = np.array([A11[3], Act[3], A13[4], A13[5], A22[5], A22[6], A22[7], A22[8], A22[9], A22[10]])[:,:self.Ngg]
def setBias(self, bias):
b1 = bias["b1"]
b2 = bias["b2"]
b3 = bias["b3"]
b4 = bias["b4"]
css = bias["css"] / self.km**2
csg = bias["csg"] / self.km**2
if self.gg: cgg = bias["cgg"] / self.km**2
self.bss = np.array([1., 2.*css, 1., 1.])
self.Xssp = np.einsum('b,bitz->itz', bss, self.Assp)
self.Xssm = np.einsum('b,bitz->itz', bss, self.Assm)
self.bsg = np.array([b1, 2.*csg, b1, b3, b1, b2, b4])
self.Xsg = np.einsum('b,bitz->itz', bsg, self.Asg)
if self.gg:
bgg = np.array([b1**2 + 2.*b1*cgg, b1**2, b1*b3, b1**2, b1*b2, b1*b4, b2**2, b2*b4, b4**2])
self.Xgg = np.einsum('b,bitz->itz', bgg, self.Agg)
M22bb = { # galaxy-galaxy
0: lambda n1, n2: (6 + n1**4 * (4 - 24 * n2) - 7 * n2 + 8 * n1**5 * n2 - 13 * n2**2 + 4 * n2**3 + 4 * n2**4 + n1**2 * (-13 + 38 * n2 + 12 * n2**2 - 8 * n2**3) + 2 * n1**3 * (2 - 5 * n2 - 4 * n2**2 + 8 * n2**3) + n1 * (-7 - 6 * n2 + 38 * n2**2 - 10 * n2**3 - 24 * n2**4 + 8 * n2**5)) / (4. * n1 * (1 + n1) * (-1 + 2 * n1) * n2 * (1 + n2) * (-1 + 2 * n2)),
1: lambda n1, n2: (-18 + n1**2 * (1 - 11 * n2) - 12 * n2 + n2**2 + 10 * n2**3 + 2 * n1**3 * (5 + 7 * n2) + n1 * (-12 - 38 * n2 - 11 * n2**2 + 14 * n2**3)) / (7. * n1 * (1 + n1) * n2 * (1 + n2)),
2: lambda n1, n2: (-3 * n1 + 2 * n1**2 + n2 * (-3 + 2 * n2)) / (n1 * n2),
3: lambda n1, n2: (-4 * (-24 + n2 + 10 * n2**2) + 2 * n1 * (-2 + 51 * n2 + 21 * n2**2) + n1**2 * (-40 + 42 * n2 + 98 * n2**2)) / (49. * n1 * (1 + n1) * n2 * (1 + n2)),
4: lambda n1, n2: (4 * (3 - 2 * n2 + n1 * (-2 + 7 * n2))) / (7. * n1 * n2),
5: lambda n1, n2: 2.
} # b1**2, b1*b2, b1*b4, b2**2, b2*b4, b4**2
M13bb = { # galaxy-galaxy
0: lambda n1: 1.125,
1: lambda n1: -(1 / (1. + n1))
} # b1**2, b1*b3
M13bm = { # galaxy-matter
0: lambda n1: (5 + 9*n1)/(8. + 8*n1),
1: lambda n1: -(1/(2. + 2*n1))
} # b1, b3
M22bm = { # galaxy-matter
0: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-2 + n1*(-1 + (13 - 6*n1)*n1) - n2 + 2*n1*(-3 + 2*n1)*(-9 + n1*(3 + 7*n1))*n2 + (13 + 2*n1*(-27 + 14*(-1 + n1)*n1))*n2**2 + 2*(-3 + n1*(-15 + 14*n1))*n2**3 + 28*n1*n2**4))/(28.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
1: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-22 + 7*n1**2*(5 + 7*n2) + n2*(16 + 35*n2) + n1*(16 + 7*n2*(6 + 7*n2))))/(98.*n1*(1 + n1)*n2*(1 + n2)),
2: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-4 + 7*n1 + 7*n2))/(14.*n1*n2)
} # b1, b2, b4
M22mm = { # matter-matter
0: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(58 + 98*n1**3*n2 + (3 - 91*n2)*n2 + 7*n1**2*(-13 - 2*n2 + 28*n2**2) + n1*(3 + 2*n2*(-73 + 7*n2*(-1 + 7*n2)))))/(196.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2))
}
M13mm = { # matter-matter
0: lambda n1: 1.125 - 1./(1. + n1)
}
| import os
import numpy as np
from numpy import pi, cos, sin, log, exp, sqrt, trapz
from scipy.interpolate import interp1d
from scipy.special import gamma
from fftlog import FFTLog, MPC
from nonlinear import M13a, M22a
from common import co
#### LOOP OVER nlens and nsource !!!
class Limber(object):
"""
...
Attributes
----------
co : class
An object of type Common() used to share data
"""
def __init__(self, theta, z, nlens, nsource, gg=True, load=True, save=True, path='./', NFFT=256, km=1.):
self.gg = gg
self.km = km
self.z = z
self.theta, _ = np.meshgrid(theta, z, indexing='ij')
self.nlens = np.asarray(nlens)
self.nsource = np.asarray(nsource)
self.Ng = self.nlens.shape[0]
self.Ns = self.nsource.shape[0]
self.Nss = self.Ns*(self.Ns+1)//2
self.Nsg = self.Ns*self.Ng
self.Ngg = self.Ng
self.N = max([self.Nss, self.Nsg])
self.fftsettings = dict(Nmax=NFFT, xmin=1.5e-5, xmax=1.e3, bias=-1.6)
self.fft = FFTLog(**self.fftsettings)
if self.gg: self.pyegg = os.path.join(path, 'pyegg%s_limber.npz') % (NFFT)
else: self.pyegg = os.path.join(path, 'pyegg%s_limber_nogg.npz') % (NFFT)
if load is True:
try:
L = np.load( self.pyegg )
if (self.fft.Pow - L['Pow']).any():
print ('Loaded loop matrices do not correspond to asked FFTLog configuration. \n Computing new matrices.')
load = False
else:
self.M11, self.M22, self.M13, self.Mct = L['M11'], L['M22'], L['M13'], L['Mct']
save = False
except:
print ('Can\'t load loop matrices at %s. \n Computing new matrices.' % path)
load = False
if load is False:
self.setM()
self.setM11()
self.setMct()
self.setM13()
self.setM22()
if save is True:
try: np.savez(self.pyegg, Pow=self.fft.Pow, M11=self.M11, M22=self.M22, M13=self.M13, Mct=self.Mct)
except: print ('Can\'t save loop matrices at %s.' % path)
self.setsPow()
# To speed-up matrix multiplication:
self.optipath13 = np.einsum_path('ns,ms,bnm->bs', self.sPow, self.sPow, self.M22, optimize='optimal')[0]
self.optipath22 = np.einsum_path('ns,ms,bnm->bs', self.sPow, self.sPow, self.M13, optimize='optimal')[0]
def setsPow(self):
""" Compute the r's to the powers of the FFTLog to evaluate the loop 'ular' correlation function. Called at the instantiation of the class. """
#slog = np.geomspace(1e-4, 3., 40)
#slin = np.arange(3, 200., 1)
#slog2 = np.geomspace(200, 1e4, 20)
#self.s = np.unique(np.concatenate([slog, slin, slog2]))
self.s = np.geomspace(1.e-4, 1.e3, 200)
self.sPow = exp(np.einsum('n,s->ns', -self.fft.Pow - 3. - 0.5, log(self.s)))
def setM(self):
""" Compute the power spectrum to 'ular' correlation function spherical Bessel transform matrices. Called at the instantiation of the class if the matrices are not loaded. """
M = np.empty(shape=(3, self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
for l in range(3):
for u, n1 in enumerate(-0.5 * self.fft.Pow):
for v, n2 in enumerate(-0.5 * self.fft.Pow):
self.M[l, u, v] = (2*pi)**.5 * MPC(2 * l - 0.5, n1 + n2 - 1.5)
def setM22(self):
""" Compute the 22-loop matrices. Called at the instantiation of the class if the matrices are not loaded. """
self.Mbb22 = np.empty(shape=(6, self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
self.Mbm22 = np.empty(shape=(3, self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
self.Mmm22 = np.empty(shape=(2, self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
Ma = np.empty(shape=(self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex') # common piece of M22
Mmm = np.empty(shape=(self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex') # matter-matter M22
for u, n1 in enumerate(-0.5 * self.fft.Pow):
for v, n2 in enumerate(-0.5 * self.fft.Pow):
Ma[u, v] = M22a(n1, n2)
Mmm[u, v] = M22mm[0](n1, n2)
for i in range(6):
Mbb = np.empty(shape=(self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
Mbm = np.empty(shape=(self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
for u, n1 in enumerate(-0.5 * self.fft.Pow):
for v, n2 in enumerate(-0.5 * self.fft.Pow):
Mbb[u, v] = M22bb[i](n1, n2)
if i < 3: Mbm[u, v] = M22bb[i](n1, n2)
self.Mbb22[i] = Mbb
if i < 3: self.Mbm22[i] = Mbm
self.Mbb22 = np.einsum('nm,nm,bnm->bnm', self.M[0], Ma, self.Mbb22)
self.Mbm22 = np.einsum('nm,nm,bnm->bnm', self.M[1], Ma, self.Mbm22)
self.Mmm22 = np.einsum('lnm,nm,nm->lnm', self.M[[0,2]], Ma, Mmm)
if self.gg: self.M22 = np.hstack([self.Mmm22, self.Mbm22, self.Mbb22])
else: self.M22 = np.hstack([self.Mmm22, self.Mbm22])
def setM13(self):
""" Compute the 13-loop matrices. Called at the instantiation of the class if the matrices are not loaded. """
self.Mbb13 = np.empty(shape=(2, self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
self.Mbm13 = np.empty(shape=(2, self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
self.Mmm13 = np.empty(shape=(2, self.fft.Pow.shape[0], self.fft.Pow.shape[0]), dtype='complex')
Ma = M13a(-0.5 * self.fft.Pow)
Mmm = M13mm[0](-0.5 * self.fft.Pow)
for i in range(2):
self.Mbb13[i] = M13bb[i](-0.5 * self.fft.Pow)
self.Mbm13[i] = M13bm[i](-0.5 * self.fft.Pow)
self.Mbb13 = np.einsum('nm,n,bn->bnm', self.M[0], Ma, self.Mbb13)
self.Mbm13 = np.einsum('nm,n,bn->bnm', self.M[1], Ma, self.Mbm13)
self.Mmm13 = np.einsum('lnm,n,n->lnm', self.M[[0,2]], Ma, Mmm)
if self.gg: self.M13 = np.hstack([self.Mmm13, self.Mbm13, self.Mbb13])
else: self.M22 = np.hstack([self.Mmm13, self.Mbm13])
def setM11(self):
""" Compute the linear matrices. Called at the instantiation of the class if the matrices are not loaded. """
self.M11 = np.empty(shape=(3, self.fft.Pow.shape[0]), dtype='complex')
for l in range(3): self.M11[l] = (2*pi)**.5 * MPC(2 * l - 0.5, -0.5 * self.fft.Pow)
def setMct(self):
""" Compute the counterterm matrices. Called at the instantiation of the class if the matrices are not loaded. """
self.Mct = np.empty(shape=(3, self.fft.Pow.shape[0]), dtype='complex')
for l in range(3): self.Mct[l, u] = (2*pi)**.5 * MPC(2 * l - 0.5, -0.5 * self.fft.Pow - 1.)
def getA11(self, CoefsPow):
""" Perform the linear correlation function matrix multiplications """
A11 = np.real(np.einsum('ns,ln->ls', CoefsPow, self.M11))
if self.gg: return np.array([A11[0], A11[2], A11[1], A11[0]])
else: return np.array([A11[0], A11[2], A11[1]])
def getAct(self, CoefsPow):
""" Perform the counterterm correlation function matrix multiplications """
Act = self.s**-2 * np.real(np.einsum('ns,ln->ls', CoefsPow, self.Mct))
if self.gg: return np.array([Act[0], Act[2], Act[1], Act[0]])
else: return np.array([Act[0], Act[2], Act[1]])
def getA22(self, CoefsPow):
""" Perform the 22-loop correlation function matrix multiplications """
return np.real(np.einsum('ns,ms,bnm->bs', CoefsPow, CoefsPow, self.M22, optimize=self.optipath22))
def getA13(self, CoefsPow):
""" Perform the 13-loop correlation function matrix multiplications """
return np.real(np.einsum('ns,ms,bnm->bs', CoefsPow, CoefsPow, self.M13, optimize=self.optipath13))
def getA(self, bird, window=None):
coef = self.fft.Coef(bird.kin, bird.kin**-0.5 * self.Pin, window=.2)
coefsPow = np.einsum('n,ns->ns', Coef, self.sPow)
A11 = self.getA11(coefsPow, bird)
Act = self.getAct(coefsPow, bird)
A22 = self.getA22(coefsPow, bird)
A13 = self.getA13(coefsPow, bird)
return A11, Act, A22, A13
def Xi(self, bird, rz, dz_by_dr, Dz, Dfid, h, Omega0_m):
Dp2 = Dz**2 / Dfid**2
Dp4 = D2**2
lensing_factor = 1.5/conts.c**2 * h**2 * 1e10 * Omega0_m
r1, _ = np.meshgrid(rz, self.z, indexing='ij')
def lensing_efficiency(nz):
return lensing_factor * rz * (1+z) * np.trapz(np.heaviside(rz-r1, 0.) * nz * (rz-r1)/rz, x=self.z, axis=-1)
qshear = np.empty_like(self.nsource)
qgal = np.empty_like(self.nlens)
for i, ns in enumerate(self.nsource): qshear[i] = self.lensing_efficiency(ns)
for i, nl in enumerate(self.nlens): qgal[i] = dz_by_dr * nl
qsqs = np.zeros(shape=(self.N))
for i, qi in enumerate(qshear):
for j, qj in enumerate(qshear):
if qj <= qi: qsqs[i+j] = qi*qj
qsqg = np.zeros(shape=(self.N))
for i, qi in enumerate(qshear):
for j, qj in enumerate(qgal):
qsqg[i+j] = qi*qj
if self.gg:
qgqg = np.zeros(shape=(self.N))
for i, qi in enumerate(qgal):
qgqg[i] = qi**2
if self.gg:
qq11 = np.array([qsqs, qsqs, qsqd, qgqg])
qq13 = np.array([qsqs, qsqs, qsqd, qsqd, qgqg, qgqg])
qq22 = np.array([qsqs, qsqs, qsqd, qsqd, qgqg, qgqg, qgqg, qgqg, qgqg, qgqg])
else:
qq11 = np.array([qsqs, qsqs, qsqd])
qq13 = np.array([qsqs, qsqs, qsqd, qsqd])
qq22 = np.array([qsqs, qsqs, qsqd, qsqd])
def time_integral(qq, DD, A):
A1 = interp1d(self.s, A, kind='cubic', axis=-1)(self.theta * rz)
return np.trapz(np.einsum('biz,z,btz->bitz', qq, DD, A1), x=rz, axis=-1)
A11 = time_integral(qq11, Dp2, A11)
Act = time_integral(qq11, Dp2, Act)
A13 = time_integral(qq13, Dp4, A13)
A22 = time_integral(qq22, Dp4, A22)
self.Assp = np.array([A11[0], Act[0], A13[0], A22[0]])[:,:self.Nss]
self.Assm = np.array([A11[1], Act[1], A13[1], A22[1]])[:,:self.Nss]
self.Asg = np.array([A11[2], Act[2], A13[2], A13[3], A22[2], A22[3], A22[4]])[:,self.Nsg]
if self.gg: self.Agg = np.array([A11[3], Act[3], A13[4], A13[5], A22[5], A22[6], A22[7], A22[8], A22[9], A22[10]])[:,:self.Ngg]
def setBias(self, bias):
b1 = bias["b1"]
b2 = bias["b2"]
b3 = bias["b3"]
b4 = bias["b4"]
css = bias["css"] / self.km**2
csg = bias["csg"] / self.km**2
if self.gg: cgg = bias["cgg"] / self.km**2
self.bss = np.array([1., 2.*css, 1., 1.])
self.Xssp = np.einsum('b,bitz->itz', bss, self.Assp)
self.Xssm = np.einsum('b,bitz->itz', bss, self.Assm)
self.bsg = np.array([b1, 2.*csg, b1, b3, b1, b2, b4])
self.Xsg = np.einsum('b,bitz->itz', bsg, self.Asg)
if self.gg:
bgg = np.array([b1**2 + 2.*b1*cgg, b1**2, b1*b3, b1**2, b1*b2, b1*b4, b2**2, b2*b4, b4**2])
self.Xgg = np.einsum('b,bitz->itz', bgg, self.Agg)
M22bb = { # galaxy-galaxy
0: lambda n1, n2: (6 + n1**4 * (4 - 24 * n2) - 7 * n2 + 8 * n1**5 * n2 - 13 * n2**2 + 4 * n2**3 + 4 * n2**4 + n1**2 * (-13 + 38 * n2 + 12 * n2**2 - 8 * n2**3) + 2 * n1**3 * (2 - 5 * n2 - 4 * n2**2 + 8 * n2**3) + n1 * (-7 - 6 * n2 + 38 * n2**2 - 10 * n2**3 - 24 * n2**4 + 8 * n2**5)) / (4. * n1 * (1 + n1) * (-1 + 2 * n1) * n2 * (1 + n2) * (-1 + 2 * n2)),
1: lambda n1, n2: (-18 + n1**2 * (1 - 11 * n2) - 12 * n2 + n2**2 + 10 * n2**3 + 2 * n1**3 * (5 + 7 * n2) + n1 * (-12 - 38 * n2 - 11 * n2**2 + 14 * n2**3)) / (7. * n1 * (1 + n1) * n2 * (1 + n2)),
2: lambda n1, n2: (-3 * n1 + 2 * n1**2 + n2 * (-3 + 2 * n2)) / (n1 * n2),
3: lambda n1, n2: (-4 * (-24 + n2 + 10 * n2**2) + 2 * n1 * (-2 + 51 * n2 + 21 * n2**2) + n1**2 * (-40 + 42 * n2 + 98 * n2**2)) / (49. * n1 * (1 + n1) * n2 * (1 + n2)),
4: lambda n1, n2: (4 * (3 - 2 * n2 + n1 * (-2 + 7 * n2))) / (7. * n1 * n2),
5: lambda n1, n2: 2.
} # b1**2, b1*b2, b1*b4, b2**2, b2*b4, b4**2
M13bb = { # galaxy-galaxy
0: lambda n1: 1.125,
1: lambda n1: -(1 / (1. + n1))
} # b1**2, b1*b3
M13bm = { # galaxy-matter
0: lambda n1: (5 + 9*n1)/(8. + 8*n1),
1: lambda n1: -(1/(2. + 2*n1))
} # b1, b3
M22bm = { # galaxy-matter
0: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-2 + n1*(-1 + (13 - 6*n1)*n1) - n2 + 2*n1*(-3 + 2*n1)*(-9 + n1*(3 + 7*n1))*n2 + (13 + 2*n1*(-27 + 14*(-1 + n1)*n1))*n2**2 + 2*(-3 + n1*(-15 + 14*n1))*n2**3 + 28*n1*n2**4))/(28.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
1: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-22 + 7*n1**2*(5 + 7*n2) + n2*(16 + 35*n2) + n1*(16 + 7*n2*(6 + 7*n2))))/(98.*n1*(1 + n1)*n2*(1 + n2)),
2: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-4 + 7*n1 + 7*n2))/(14.*n1*n2)
} # b1, b2, b4
M22mm = { # matter-matter
0: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(58 + 98*n1**3*n2 + (3 - 91*n2)*n2 + 7*n1**2*(-13 - 2*n2 + 28*n2**2) + n1*(3 + 2*n2*(-73 + 7*n2*(-1 + 7*n2)))))/(196.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2))
}
M13mm = { # matter-matter
0: lambda n1: 1.125 - 1./(1. + n1)
} | en | 0.701286 | #### LOOP OVER nlens and nsource !!! ... Attributes ---------- co : class An object of type Common() used to share data # To speed-up matrix multiplication: Compute the r's to the powers of the FFTLog to evaluate the loop 'ular' correlation function. Called at the instantiation of the class. #slog = np.geomspace(1e-4, 3., 40) #slin = np.arange(3, 200., 1) #slog2 = np.geomspace(200, 1e4, 20) #self.s = np.unique(np.concatenate([slog, slin, slog2])) Compute the power spectrum to 'ular' correlation function spherical Bessel transform matrices. Called at the instantiation of the class if the matrices are not loaded. Compute the 22-loop matrices. Called at the instantiation of the class if the matrices are not loaded. # common piece of M22 # matter-matter M22 Compute the 13-loop matrices. Called at the instantiation of the class if the matrices are not loaded. Compute the linear matrices. Called at the instantiation of the class if the matrices are not loaded. Compute the counterterm matrices. Called at the instantiation of the class if the matrices are not loaded. Perform the linear correlation function matrix multiplications Perform the counterterm correlation function matrix multiplications Perform the 22-loop correlation function matrix multiplications Perform the 13-loop correlation function matrix multiplications # galaxy-galaxy # b1**2, b1*b2, b1*b4, b2**2, b2*b4, b4**2 # galaxy-galaxy # b1**2, b1*b3 # galaxy-matter # b1, b3 # galaxy-matter # b1, b2, b4 # matter-matter # matter-matter | 1.88867 | 2 |
src/__init__.py | rohansurve212/Black_Friday_Data_Hack | 0 | 6616956 | <reponame>rohansurve212/Black_Friday_Data_Hack<filename>src/__init__.py
from .make_dataset import make_dataset
from .build_features import train_stack_test_split
from .build_features import convert_data_types
from .build_features import separate_features_target
from .build_features import num_cat_feature_columns
from .visualize_data import relationship_with_target
from .preprocess_data import transform_num_features
from .preprocess_data import transform_cat_features
from .preprocess_data import transform_all_features
from .train_models import train_lasso_reg
from .train_models import train_random_forest_reg
from .train_models import train_xgboost_reg
from .train_models import train_deep_neural_network
from .blend_stacked_models import stack_models
from .evaluate_models import evaluate_on_test
| from .make_dataset import make_dataset
from .build_features import train_stack_test_split
from .build_features import convert_data_types
from .build_features import separate_features_target
from .build_features import num_cat_feature_columns
from .visualize_data import relationship_with_target
from .preprocess_data import transform_num_features
from .preprocess_data import transform_cat_features
from .preprocess_data import transform_all_features
from .train_models import train_lasso_reg
from .train_models import train_random_forest_reg
from .train_models import train_xgboost_reg
from .train_models import train_deep_neural_network
from .blend_stacked_models import stack_models
from .evaluate_models import evaluate_on_test | none | 1 | 1.098618 | 1 | |
onesecmail/__version__.py | yyyyyyyan/onesecmail | 1 | 6616957 | # _ _
# ___ _ __ ___ ___ ___ ___ _ __ ___ __ _(_) |
# / _ \| '_ \ / _ \/ __|/ _ \/ __| '_ ` _ \ / _` | | |
# | (_) | | | | __/\__ \ __/ (__| | | | | | (_| | | |
# \___/|_| |_|\___||___/\___|\___|_| |_| |_|\__,_|_|_|
#
__title__ = "onesecmail"
__description__ = "The unofficial Python client for the 1secmail API."
__url__ = "https://github.com/yyyyyyyan/onesecmail"
__version__ = "0.0.1"
__author__ = "yyyyyyyan"
__author_email__ = "<EMAIL>"
__license__ = "MIT"
__copyright__ = "Copyright (c) 2021 yyyyyyyan"
| # _ _
# ___ _ __ ___ ___ ___ ___ _ __ ___ __ _(_) |
# / _ \| '_ \ / _ \/ __|/ _ \/ __| '_ ` _ \ / _` | | |
# | (_) | | | | __/\__ \ __/ (__| | | | | | (_| | | |
# \___/|_| |_|\___||___/\___|\___|_| |_| |_|\__,_|_|_|
#
__title__ = "onesecmail"
__description__ = "The unofficial Python client for the 1secmail API."
__url__ = "https://github.com/yyyyyyyan/onesecmail"
__version__ = "0.0.1"
__author__ = "yyyyyyyan"
__author_email__ = "<EMAIL>"
__license__ = "MIT"
__copyright__ = "Copyright (c) 2021 yyyyyyyan"
| en | 0.251147 | # _ _ # ___ _ __ ___ ___ ___ ___ _ __ ___ __ _(_) | # / _ \| '_ \ / _ \/ __|/ _ \/ __| '_ ` _ \ / _` | | | # | (_) | | | | __/\__ \ __/ (__| | | | | | (_| | | | # \___/|_| |_|\___||___/\___|\___|_| |_| |_|\__,_|_|_| # | 1.347129 | 1 |
uliweb/lib/weto/cache.py | limodou/uliweb3 | 16 | 6616958 | <filename>uliweb/lib/weto/cache.py<gh_stars>10-100
#########################################################################
# cache module written by limodou(<EMAIL>) at 2009/11/03
#
# storage class will ensure the sync when load and save a session from
# and to the storage.
#########################################################################
from .backends.base import KeyError
import json
from uliweb.utils._compat import pickle, callable
__modules__ = {}
def wrap_func(des, src):
des.__name__ = src.__name__
des.__globals__.update(src.__globals__)
des.__doc__ = src.__doc__
des.__module__ = src.__module__
des.__dict__.update(src.__dict__)
return des
class NoSerial(object):
def load(self, s):
return s
def dump(self, v):
return v
class Serial(NoSerial):
protocal_level = pickle.HIGHEST_PROTOCOL
def load(self, s):
return pickle.loads(s)
def dump(self, v):
return pickle.dumps(v, self.protocal_level)
class JsonSerial(Serial):
def load(self, s):
return json.loads(s)
def dump(self, v):
return json.dumps(v)
class Empty(object):
pass
class Cache(object):
def __init__(self, storage_type='file', options=None, expiry_time=3600*24*365,
serial_cls=None):
self._storage_type = storage_type
self._options = options or {}
self._storage_cls = self.__get_storage()
self._storage = None
self._serial_cls = serial_cls or Serial
self.serial_obj = self._serial_cls()
self.expiry_time = expiry_time
def __get_storage(self):
modname = 'weto.backends.%s_storage' % self._storage_type
if modname in __modules__:
return __modules__[modname]
else:
mod = __import__(modname, fromlist=['*'])
_class = getattr(mod, 'Storage', None)
__modules__[modname] = _class
return _class
@property
def storage(self):
if not self._storage:
d = {}
if self._storage_type == 'file':
d = {'file_dir_name':'cache_files', 'lock_dir_name':'cache_files_lock'}
self._storage = self._storage_cls(self, self._options, **d)
return self._storage
def get(self, key, default=Empty, creator=Empty, expire=None):
"""
:para default: if default is callable then invoke it, save it and return it
"""
try:
return self.storage.get(key)
except KeyError as e:
if creator is not Empty:
if callable(creator):
v = creator()
else:
v = creator
self.set(key, v, expire)
return v
else:
if default is not Empty:
if callable(default):
v = default()
return v
return default
else:
raise
def set(self, key, value=None, expire=None):
if callable(value):
value = value()
return self.storage.set(key, value, expire or self.expiry_time)
def delete(self, key):
return self.storage.delete(key)
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
if callable(value):
value = value()
return self.set(key, value)
def __delitem__(self, key):
self.delete(key)
def setdefault(self, key, defaultvalue, expire=None):
v = self.get(key, creator=defaultvalue, expire=expire)
return v
def inc(self, key, step=1, expire=None):
return self.storage.inc(key, step, expire or self.expiry_time)
def dec(self, key, step=1, expire=None):
return self.storage.dec(key, step, expire or self.expiry_time)
def cache(self, k=None, expire=None):
def _f(func):
def f(*args, **kwargs):
if not k:
r = repr(args) + repr(sorted(kwargs.items()))
key = func.__module__ + '.' + func.__name__ + r
else:
key = k
try:
ret = self.get(key)
return ret
except KeyError:
ret = func(*args, **kwargs)
self.set(key, ret, expire=expire)
return ret
wrap_func(f, func)
return f
return _f
| <filename>uliweb/lib/weto/cache.py<gh_stars>10-100
#########################################################################
# cache module written by limodou(<EMAIL>) at 2009/11/03
#
# storage class will ensure the sync when load and save a session from
# and to the storage.
#########################################################################
from .backends.base import KeyError
import json
from uliweb.utils._compat import pickle, callable
__modules__ = {}
def wrap_func(des, src):
des.__name__ = src.__name__
des.__globals__.update(src.__globals__)
des.__doc__ = src.__doc__
des.__module__ = src.__module__
des.__dict__.update(src.__dict__)
return des
class NoSerial(object):
def load(self, s):
return s
def dump(self, v):
return v
class Serial(NoSerial):
protocal_level = pickle.HIGHEST_PROTOCOL
def load(self, s):
return pickle.loads(s)
def dump(self, v):
return pickle.dumps(v, self.protocal_level)
class JsonSerial(Serial):
def load(self, s):
return json.loads(s)
def dump(self, v):
return json.dumps(v)
class Empty(object):
pass
class Cache(object):
def __init__(self, storage_type='file', options=None, expiry_time=3600*24*365,
serial_cls=None):
self._storage_type = storage_type
self._options = options or {}
self._storage_cls = self.__get_storage()
self._storage = None
self._serial_cls = serial_cls or Serial
self.serial_obj = self._serial_cls()
self.expiry_time = expiry_time
def __get_storage(self):
modname = 'weto.backends.%s_storage' % self._storage_type
if modname in __modules__:
return __modules__[modname]
else:
mod = __import__(modname, fromlist=['*'])
_class = getattr(mod, 'Storage', None)
__modules__[modname] = _class
return _class
@property
def storage(self):
if not self._storage:
d = {}
if self._storage_type == 'file':
d = {'file_dir_name':'cache_files', 'lock_dir_name':'cache_files_lock'}
self._storage = self._storage_cls(self, self._options, **d)
return self._storage
def get(self, key, default=Empty, creator=Empty, expire=None):
"""
:para default: if default is callable then invoke it, save it and return it
"""
try:
return self.storage.get(key)
except KeyError as e:
if creator is not Empty:
if callable(creator):
v = creator()
else:
v = creator
self.set(key, v, expire)
return v
else:
if default is not Empty:
if callable(default):
v = default()
return v
return default
else:
raise
def set(self, key, value=None, expire=None):
if callable(value):
value = value()
return self.storage.set(key, value, expire or self.expiry_time)
def delete(self, key):
return self.storage.delete(key)
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
if callable(value):
value = value()
return self.set(key, value)
def __delitem__(self, key):
self.delete(key)
def setdefault(self, key, defaultvalue, expire=None):
v = self.get(key, creator=defaultvalue, expire=expire)
return v
def inc(self, key, step=1, expire=None):
return self.storage.inc(key, step, expire or self.expiry_time)
def dec(self, key, step=1, expire=None):
return self.storage.dec(key, step, expire or self.expiry_time)
def cache(self, k=None, expire=None):
def _f(func):
def f(*args, **kwargs):
if not k:
r = repr(args) + repr(sorted(kwargs.items()))
key = func.__module__ + '.' + func.__name__ + r
else:
key = k
try:
ret = self.get(key)
return ret
except KeyError:
ret = func(*args, **kwargs)
self.set(key, ret, expire=expire)
return ret
wrap_func(f, func)
return f
return _f
| de | 0.39065 | ######################################################################### # cache module written by limodou(<EMAIL>) at 2009/11/03 # # storage class will ensure the sync when load and save a session from # and to the storage. ######################################################################### :para default: if default is callable then invoke it, save it and return it | 2.094044 | 2 |
tests/test_quick_email.py | murrple-1/send-email-python | 1 | 6616959 | import unittest
import time
from quick_email import send_email
import tests.smtp as smtp
class TestQuickEmail(unittest.TestCase):
def run(self, *args, **kwargs):
result = super().run(*args, **kwargs)
time.sleep(smtp.SMTP_RATE_LIMIT_SECONDS)
return result
def test_send_email_auth(self):
send_email(smtp.SMTP_HOST, smtp.SMTP_PORT, u'Example <<EMAIL>>', u'The Subject', send_to=u'Test <<EMAIL>>',
plain_text=u'Some Text', html_text=u'<b>Some Bold Text</b>', username=smtp.SMTP_USERNAME, password=smtp.SMTP_PASSWORD)
def test_send_email_starttls_auth(self):
send_email(smtp.SMTP_HOST, smtp.SMTP_PORT, u'Example <<EMAIL>>', u'The Subject', send_to=u'Test <<EMAIL>>',
plain_text=u'Some Text', html_text=u'<b>Some Bold Text</b>', username=smtp.SMTP_USERNAME, password=smtp.SMTP_PASSWORD, require_starttls=True)
if __name__ == u'__main__':
unittest.main()
| import unittest
import time
from quick_email import send_email
import tests.smtp as smtp
class TestQuickEmail(unittest.TestCase):
def run(self, *args, **kwargs):
result = super().run(*args, **kwargs)
time.sleep(smtp.SMTP_RATE_LIMIT_SECONDS)
return result
def test_send_email_auth(self):
send_email(smtp.SMTP_HOST, smtp.SMTP_PORT, u'Example <<EMAIL>>', u'The Subject', send_to=u'Test <<EMAIL>>',
plain_text=u'Some Text', html_text=u'<b>Some Bold Text</b>', username=smtp.SMTP_USERNAME, password=smtp.SMTP_PASSWORD)
def test_send_email_starttls_auth(self):
send_email(smtp.SMTP_HOST, smtp.SMTP_PORT, u'Example <<EMAIL>>', u'The Subject', send_to=u'Test <<EMAIL>>',
plain_text=u'Some Text', html_text=u'<b>Some Bold Text</b>', username=smtp.SMTP_USERNAME, password=smtp.SMTP_PASSWORD, require_starttls=True)
if __name__ == u'__main__':
unittest.main()
| none | 1 | 2.902353 | 3 | |
pythonizame/apps/videos/models.py | kiubtech/pythonizame | 6 | 6616960 | import os
import uuid
import logging
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.utils import timezone
from ckeditor_uploader.fields import RichTextUploadingField
# Iniciamos el logger para estas vistas
logger = logging.getLogger(__name__)
def image_path_video(self, filename):
today = timezone.now()
path = "videos/{0}/{1}/{2}/".format(today.year, today.month, today.day)
ext = filename.split('.')[-1]
my_filename = "{0}.{1}".format(str(uuid.uuid1()).replace('-', ''), ext)
url = os.path.join(path, my_filename)
return url
class VideoCategory(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField(unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = _("Categoría de videos")
verbose_name_plural = _("Categorías de videos")
class PlayList(models.Model):
STATUS_LIST = (
(0, _("No aprobado")),
(1, _("Aprobado")),
(2, _("En revisión"))
)
title = models.CharField(max_length=500, unique=True)
slug = models.SlugField(unique=True)
abstract = models.TextField(max_length=5000)
description = RichTextUploadingField(max_length=1000000)
categories = models.ManyToManyField(VideoCategory, help_text=_("Categorías a la que pertenece"))
cover = models.ImageField(null=True, blank=True, upload_to=image_path_video)
created_by = models.ForeignKey(User, related_name='created_by')
author = models.ForeignKey(User, related_name='author')
status = models.IntegerField(choices=STATUS_LIST, default=2)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Meta:
verbose_name = _("Lista de reproducción")
verbose_name_plural = _("Listas de reproducción")
@property
def url(self):
return "https://pythoniza.me/{0}".format(self.slug)
class Video(models.Model):
created_by = models.ForeignKey(User)
order = models.IntegerField(default=1)
playlist = models.ForeignKey(PlayList, help_text=_("Lista de reproducción al que pertenece el video"))
title = models.CharField(max_length=500)
description = RichTextUploadingField(max_length=1000000)
youtube_id = models.CharField(max_length=500)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Meta:
ordering = ('order', )
verbose_name = _("Video")
verbose_name_plural = _("Videos")
| import os
import uuid
import logging
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.utils import timezone
from ckeditor_uploader.fields import RichTextUploadingField
# Iniciamos el logger para estas vistas
logger = logging.getLogger(__name__)
def image_path_video(self, filename):
today = timezone.now()
path = "videos/{0}/{1}/{2}/".format(today.year, today.month, today.day)
ext = filename.split('.')[-1]
my_filename = "{0}.{1}".format(str(uuid.uuid1()).replace('-', ''), ext)
url = os.path.join(path, my_filename)
return url
class VideoCategory(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField(unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = _("Categoría de videos")
verbose_name_plural = _("Categorías de videos")
class PlayList(models.Model):
STATUS_LIST = (
(0, _("No aprobado")),
(1, _("Aprobado")),
(2, _("En revisión"))
)
title = models.CharField(max_length=500, unique=True)
slug = models.SlugField(unique=True)
abstract = models.TextField(max_length=5000)
description = RichTextUploadingField(max_length=1000000)
categories = models.ManyToManyField(VideoCategory, help_text=_("Categorías a la que pertenece"))
cover = models.ImageField(null=True, blank=True, upload_to=image_path_video)
created_by = models.ForeignKey(User, related_name='created_by')
author = models.ForeignKey(User, related_name='author')
status = models.IntegerField(choices=STATUS_LIST, default=2)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Meta:
verbose_name = _("Lista de reproducción")
verbose_name_plural = _("Listas de reproducción")
@property
def url(self):
return "https://pythoniza.me/{0}".format(self.slug)
class Video(models.Model):
created_by = models.ForeignKey(User)
order = models.IntegerField(default=1)
playlist = models.ForeignKey(PlayList, help_text=_("Lista de reproducción al que pertenece el video"))
title = models.CharField(max_length=500)
description = RichTextUploadingField(max_length=1000000)
youtube_id = models.CharField(max_length=500)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Meta:
ordering = ('order', )
verbose_name = _("Video")
verbose_name_plural = _("Videos")
| es | 0.834072 | # Iniciamos el logger para estas vistas | 2.240191 | 2 |
6-nyrkkeilykello/s60/halfMinute.py | AlexRogalskiy/Duino | 0 | 6616961 | # halfMinute.py - Print "Bling" every half minute
# (c) <NAME> & <NAME> http://sulautetut.fi
import e32
def bling():
print("Bling!")
blingTimer.cancel()
blingTimer.after(30, bling)
# main
blingTimer = e32.Ao_timer()
bling()
| # halfMinute.py - Print "Bling" every half minute
# (c) <NAME> & <NAME> http://sulautetut.fi
import e32
def bling():
print("Bling!")
blingTimer.cancel()
blingTimer.after(30, bling)
# main
blingTimer = e32.Ao_timer()
bling()
| en | 0.4282 | # halfMinute.py - Print "Bling" every half minute # (c) <NAME> & <NAME> http://sulautetut.fi # main | 3.254993 | 3 |
simple.py | Rapid292/regular-expression | 0 | 6616962 | import re
from re import match
text_to_search = '''
abcdefghijklmnopqurtuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
Ha HaHa
MetaCharacters (Need to be escaped):
. ^ $ * + ? { } [ ] \ | ( )
coreyms.com
321-555-4321
123.555.1234
123*555*1234
800-555-1234
900-555-1234
Mr. Schafer
Mr Smith
Ms Davis
Mrs. Robinson
Mr. T
cat
mat
bat
pat
'''
sentence = 'Start a sentence and then bring it to an end'
# pattern = re.compile(r'defg')
# pattern = re.compile(r'\.')
# pattern = re.compile(r'coreyms\.com')
# pattern = re.compile(r'^Start')
# pattern = re.compile(r'end$')
# pattern = re.compile(r'[89]00[-.]\d\d\d[-.]\d\d\d')
# pattern = re.compile(r'[a-zA-Z1-9]')
# pattern = re.compile(r'[^b]at')
# pattern = re.compile(r'\d{3}.\d{3}.\d{4}')
pattern = re.compile(r'(Mr|Mrs|Ms)\.?\s[A-Z]\w*')
# pattern = re.compile(r'sentence')
# pattern = re.compile(r'start', re.IGNORECASE)
matches = pattern.finditer(text_to_search)
# matches = pattern.findall(text_to_search)
# matches = pattern.match(Start) #Search at starting of sentence only and return first matching object else None
# matches = pattern.search(sentence)
print(matches)
for match in matches:
print(match)
# with open('data.txt', 'rt', encoding='utf-8') as f:
# contents = f.read()
# matches = pattern.finditer(contents)
# for match in matches:
# print(match)
# print(r'\tTab')
# print(text_to_search[214:215]) | import re
from re import match
text_to_search = '''
abcdefghijklmnopqurtuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
Ha HaHa
MetaCharacters (Need to be escaped):
. ^ $ * + ? { } [ ] \ | ( )
coreyms.com
321-555-4321
123.555.1234
123*555*1234
800-555-1234
900-555-1234
Mr. Schafer
Mr Smith
Ms Davis
Mrs. Robinson
Mr. T
cat
mat
bat
pat
'''
sentence = 'Start a sentence and then bring it to an end'
# pattern = re.compile(r'defg')
# pattern = re.compile(r'\.')
# pattern = re.compile(r'coreyms\.com')
# pattern = re.compile(r'^Start')
# pattern = re.compile(r'end$')
# pattern = re.compile(r'[89]00[-.]\d\d\d[-.]\d\d\d')
# pattern = re.compile(r'[a-zA-Z1-9]')
# pattern = re.compile(r'[^b]at')
# pattern = re.compile(r'\d{3}.\d{3}.\d{4}')
pattern = re.compile(r'(Mr|Mrs|Ms)\.?\s[A-Z]\w*')
# pattern = re.compile(r'sentence')
# pattern = re.compile(r'start', re.IGNORECASE)
matches = pattern.finditer(text_to_search)
# matches = pattern.findall(text_to_search)
# matches = pattern.match(Start) #Search at starting of sentence only and return first matching object else None
# matches = pattern.search(sentence)
print(matches)
for match in matches:
print(match)
# with open('data.txt', 'rt', encoding='utf-8') as f:
# contents = f.read()
# matches = pattern.finditer(contents)
# for match in matches:
# print(match)
# print(r'\tTab')
# print(text_to_search[214:215]) | en | 0.516596 | abcdefghijklmnopqurtuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 Ha HaHa MetaCharacters (Need to be escaped): . ^ $ * + ? { } [ ] \ | ( ) coreyms.com 321-555-4321 123.555.1234 123*555*1234 800-555-1234 900-555-1234 Mr. Schafer Mr Smith Ms Davis Mrs. Robinson Mr. T cat mat bat pat # pattern = re.compile(r'defg') # pattern = re.compile(r'\.') # pattern = re.compile(r'coreyms\.com') # pattern = re.compile(r'^Start') # pattern = re.compile(r'end$') # pattern = re.compile(r'[89]00[-.]\d\d\d[-.]\d\d\d') # pattern = re.compile(r'[a-zA-Z1-9]') # pattern = re.compile(r'[^b]at') # pattern = re.compile(r'\d{3}.\d{3}.\d{4}') # pattern = re.compile(r'sentence') # pattern = re.compile(r'start', re.IGNORECASE) # matches = pattern.findall(text_to_search) # matches = pattern.match(Start) #Search at starting of sentence only and return first matching object else None # matches = pattern.search(sentence) # with open('data.txt', 'rt', encoding='utf-8') as f: # contents = f.read() # matches = pattern.finditer(contents) # for match in matches: # print(match) # print(r'\tTab') # print(text_to_search[214:215]) | 3.18349 | 3 |
qual_id/categories/anime.py | bethwilliamson/qual-id | 0 | 6616963 | <gh_stars>0
from ..category import Category
class Anime(Category):
def get_values(self):
return [
"akira",
"anohana",
"baccano",
"bakemonogatari",
"bakuman",
"berserk",
"beyblade",
"bleach",
"chihayafuru",
"clannad"
"danmachi",
"digimon",
"dororo",
"durarara",
"erased",
"gintama",
"gosick",
"haikyuu",
"hinamatsuri",
"hyouka",
"kizumonogatari",
"konosuba",
"monster",
"mushishi",
"naruto",
"nichijou",
"noblesse",
"noragami",
"oregairu",
"overlord",
"pokemon",
"relife",
"shelter",
"toradora",
"wotakoi",
]
| from ..category import Category
class Anime(Category):
def get_values(self):
return [
"akira",
"anohana",
"baccano",
"bakemonogatari",
"bakuman",
"berserk",
"beyblade",
"bleach",
"chihayafuru",
"clannad"
"danmachi",
"digimon",
"dororo",
"durarara",
"erased",
"gintama",
"gosick",
"haikyuu",
"hinamatsuri",
"hyouka",
"kizumonogatari",
"konosuba",
"monster",
"mushishi",
"naruto",
"nichijou",
"noblesse",
"noragami",
"oregairu",
"overlord",
"pokemon",
"relife",
"shelter",
"toradora",
"wotakoi",
] | none | 1 | 2.531545 | 3 | |
yapt/utils/utils.py | fral92/yapt | 4 | 6616964 | import torch
import inspect
import hashlib
import json
import logging
import numpy as np
log = logging.getLogger(__name__)
def collate_fn(list_dict, debug=False, merge='cat', dim=0):
"""
Collate function: from list-of-dicts to dict-of-lists
"""
assert merge in ('cat', 'stack'), "Only stack or cat"
dict_list = {k: [dic[k] for dic in list_dict] for k in list_dict[0]}
for k, v in dict_list.items():
if torch.is_tensor(v[0]):
if debug:
print("pre", k, len(v), v[0].shape)
if merge == 'cat':
# -- use when batch_size already exist
dict_list[k] = torch.cat(v, dim=dim)
elif merge == 'stack':
dict_list[k] = torch.stack(v, dim=dim)
if debug:
print("post", k, dict_list[k].shape)
elif isinstance(v[0], (np.ndarray, list)):
dict_list[k] = np.array(v)
return dict_list
def create_batches(list_dict, batch_size=200):
"""
Create batches from list of tasks
"""
chunks = [list_dict[x:x+batch_size] for x in range(0, len(list_dict), batch_size)]
batches = [collate_fn(c, merge='cat', dim=0) for c in chunks]
return batches
def recursive_keys(_dict):
"""
Helper function that visits recursively a dictionary and print its keys.
It is useful for debugging and exception messages.
"""
keys = []
for k, v in _dict.items():
if isinstance(v, dict):
keys += [{k: recursive_keys(v)}]
else:
keys += [k]
return keys
def listdict_to_dictlist(list_dict):
return{k: [dic[k] for dic in list_dict] for k in list_dict[0]}
def add_key_dict_prefix(_dict, prefix, sep='/'):
# -- add prefix for each key
old_keys = list(_dict.keys())
for key in old_keys:
new_key = "{}{}{}".format(prefix, sep, key)
_dict[new_key] = _dict.pop(key)
return _dict
def is_scalar(x):
if isinstance(x, (float, int)):
return True
elif (torch.is_tensor(x) and x.ndim == 0):
return True
elif (isinstance(x, np.ndarray) and x.ndim == 0):
return True
else:
return False
def is_list(obj):
from omegaconf import ListConfig
return isinstance(obj, (list, tuple, ListConfig))
def is_dict(obj):
from omegaconf import DictConfig
return isinstance(obj, (dict, DictConfig))
def is_optimizer(obj):
return isinstance(obj, torch.optim.Optimizer)
def is_dataset(obj):
return isinstance(obj, torch.utils.data.Dataset)
def is_notebook():
try:
from IPython import get_ipython
shell = get_ipython().__class__.__name__
module = get_ipython().__class__.__module__
if shell == 'ZMQInteractiveShell' or module == "google.colab._shell":
return True # Jupyter notebook, colab or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def stats_to_str(stats, fmt=":.4f"):
assert isinstance(stats, dict), \
"stats should be a dict instead is a {}".format(type(stats))
string = ''
for key, val in stats.items():
if is_scalar(val):
val = val.item() if torch.is_tensor(val) else val
string += ("{}: {" + fmt + "} - ").format(key, val)
return string
def warning_not_implemented(console_log=None, level=1):
# on first index:
# - 0 is for the called function
# - 1 is for the caller
if console_log is None:
console_log = log
name = inspect.stack()[level][3]
console_log.warning("%s method not implemented", name)
def flatten_dict(d, parent_key='', sep='.'):
from collections import MutableMapping
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def make_hash(o_dict):
d = hashlib.sha1(json.dumps(o_dict, sort_keys=True).encode())
return d.hexdigest()
def hash_from_time(lenght=10):
from time import time
hash = hashlib.sha1()
hash.update(str(time()).encode('utf-8'))
return hash.hexdigest()[:lenght]
def reshape_parameters(named_parameters, permutation=(3, 2, 1, 0)):
parameters = {}
for name, p in named_parameters:
if len(p.shape) == 4:
pp = p.permute(permutation).data.cpu().numpy()
else:
pp = p.data.cpu().numpy()
parameters[name] = pp
return parameters
def reshape_activations(outputs, permutation=(0, 2, 3, 1)):
activations = {}
for name, act in outputs.items():
if len(act.shape) == 4:
aa = act.data.permute(permutation).cpu().numpy()
else:
aa = act.data.cpu().numpy()
activations[name] = aa
return activations
def clamp(value, min_value, max_value):
return max(min_value, min(value, max_value))
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res, correct
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
| import torch
import inspect
import hashlib
import json
import logging
import numpy as np
log = logging.getLogger(__name__)
def collate_fn(list_dict, debug=False, merge='cat', dim=0):
"""
Collate function: from list-of-dicts to dict-of-lists
"""
assert merge in ('cat', 'stack'), "Only stack or cat"
dict_list = {k: [dic[k] for dic in list_dict] for k in list_dict[0]}
for k, v in dict_list.items():
if torch.is_tensor(v[0]):
if debug:
print("pre", k, len(v), v[0].shape)
if merge == 'cat':
# -- use when batch_size already exist
dict_list[k] = torch.cat(v, dim=dim)
elif merge == 'stack':
dict_list[k] = torch.stack(v, dim=dim)
if debug:
print("post", k, dict_list[k].shape)
elif isinstance(v[0], (np.ndarray, list)):
dict_list[k] = np.array(v)
return dict_list
def create_batches(list_dict, batch_size=200):
"""
Create batches from list of tasks
"""
chunks = [list_dict[x:x+batch_size] for x in range(0, len(list_dict), batch_size)]
batches = [collate_fn(c, merge='cat', dim=0) for c in chunks]
return batches
def recursive_keys(_dict):
"""
Helper function that visits recursively a dictionary and print its keys.
It is useful for debugging and exception messages.
"""
keys = []
for k, v in _dict.items():
if isinstance(v, dict):
keys += [{k: recursive_keys(v)}]
else:
keys += [k]
return keys
def listdict_to_dictlist(list_dict):
return{k: [dic[k] for dic in list_dict] for k in list_dict[0]}
def add_key_dict_prefix(_dict, prefix, sep='/'):
# -- add prefix for each key
old_keys = list(_dict.keys())
for key in old_keys:
new_key = "{}{}{}".format(prefix, sep, key)
_dict[new_key] = _dict.pop(key)
return _dict
def is_scalar(x):
if isinstance(x, (float, int)):
return True
elif (torch.is_tensor(x) and x.ndim == 0):
return True
elif (isinstance(x, np.ndarray) and x.ndim == 0):
return True
else:
return False
def is_list(obj):
from omegaconf import ListConfig
return isinstance(obj, (list, tuple, ListConfig))
def is_dict(obj):
from omegaconf import DictConfig
return isinstance(obj, (dict, DictConfig))
def is_optimizer(obj):
return isinstance(obj, torch.optim.Optimizer)
def is_dataset(obj):
return isinstance(obj, torch.utils.data.Dataset)
def is_notebook():
try:
from IPython import get_ipython
shell = get_ipython().__class__.__name__
module = get_ipython().__class__.__module__
if shell == 'ZMQInteractiveShell' or module == "google.colab._shell":
return True # Jupyter notebook, colab or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def stats_to_str(stats, fmt=":.4f"):
assert isinstance(stats, dict), \
"stats should be a dict instead is a {}".format(type(stats))
string = ''
for key, val in stats.items():
if is_scalar(val):
val = val.item() if torch.is_tensor(val) else val
string += ("{}: {" + fmt + "} - ").format(key, val)
return string
def warning_not_implemented(console_log=None, level=1):
# on first index:
# - 0 is for the called function
# - 1 is for the caller
if console_log is None:
console_log = log
name = inspect.stack()[level][3]
console_log.warning("%s method not implemented", name)
def flatten_dict(d, parent_key='', sep='.'):
from collections import MutableMapping
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def make_hash(o_dict):
d = hashlib.sha1(json.dumps(o_dict, sort_keys=True).encode())
return d.hexdigest()
def hash_from_time(lenght=10):
from time import time
hash = hashlib.sha1()
hash.update(str(time()).encode('utf-8'))
return hash.hexdigest()[:lenght]
def reshape_parameters(named_parameters, permutation=(3, 2, 1, 0)):
parameters = {}
for name, p in named_parameters:
if len(p.shape) == 4:
pp = p.permute(permutation).data.cpu().numpy()
else:
pp = p.data.cpu().numpy()
parameters[name] = pp
return parameters
def reshape_activations(outputs, permutation=(0, 2, 3, 1)):
activations = {}
for name, act in outputs.items():
if len(act.shape) == 4:
aa = act.data.permute(permutation).cpu().numpy()
else:
aa = act.data.cpu().numpy()
activations[name] = aa
return activations
def clamp(value, min_value, max_value):
return max(min_value, min(value, max_value))
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res, correct
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
| en | 0.759226 | Collate function: from list-of-dicts to dict-of-lists # -- use when batch_size already exist Create batches from list of tasks Helper function that visits recursively a dictionary and print its keys. It is useful for debugging and exception messages. # -- add prefix for each key # Jupyter notebook, colab or qtconsole # Terminal running IPython # Other type (?) # Probably standard Python interpreter # on first index: # - 0 is for the called function # - 1 is for the caller Computes the precision@k for the specified values of k | 2.443168 | 2 |
experiments/run_scripts/gpu-me-trpo-train.py | jackwilkinson255/mbmpo_master | 28 | 6616965 | from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from sandbox.ours.envs.normalized_env import normalize
from sandbox.ours.envs.base import TfEnv
from sandbox.ours.policies.improved_gauss_mlp_policy import GaussianMLPPolicy
from rllab.misc.instrument import run_experiment_lite
from rllab.misc.instrument import VariantGenerator
from rllab import config
from experiments.helpers.ec2_helpers import cheapest_subnets
from sandbox.ours.dynamics import MLPDynamicsEnsemble
from sandbox.ours.algos.ModelTRPO.model_trpo import ModelTRPO
from sandbox.ours.envs.mujoco import AntEnvRandParams, HalfCheetahEnvRandParams, HopperEnvRandParams, SwimmerEnvRandParams, WalkerEnvRandomParams
import tensorflow as tf
import sys
import argparse
import random
import json
import os
def run_train_task(vv):
import sys
print(vv['exp_prefix'])
sysout_log_path = os.path.join(config.LOG_DIR, 'local', vv['exp_prefix'], vv['exp_name'], 'stdout.log')
sysout_log_file = open(sysout_log_path, 'w')
sys.stdout = sysout_log_file
env = TfEnv(normalize(vv['env'](log_scale_limit=vv['log_scale_limit'])))
dynamics_model = MLPDynamicsEnsemble(
name="dyn_model",
env_spec=env.spec,
hidden_sizes=vv['hidden_sizes_model'],
weight_normalization=vv['weight_normalization_model'],
num_models=vv['num_models'],
valid_split_ratio=vv['valid_split_ratio'],
rolling_average_persitency=vv['rolling_average_persitency']
)
policy = GaussianMLPPolicy(
name="policy",
env_spec=env.spec,
hidden_sizes=vv['hidden_sizes_policy'],
hidden_nonlinearity=vv['hidden_nonlinearity_policy'],
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = ModelTRPO(
env=env,
policy=policy,
dynamics_model=dynamics_model,
baseline=baseline,
batch_size_env_samples=vv['batch_size_env_samples'],
batch_size_dynamics_samples=vv['batch_size_dynamics_samples'],
initial_random_samples=vv['initial_random_samples'],
num_gradient_steps_per_iter=vv['num_gradient_steps_per_iter'],
max_path_length=vv['path_length'],
n_itr=vv['n_itr'],
retrain_model_when_reward_decreases=vv['retrain_model_when_reward_decreases'],
discount=vv['discount'],
step_size=vv["step_size"],
reset_policy_std=vv['reset_policy_std'],
reinit_model_cycle=vv['reinit_model_cycle']
)
algo.train()
sysout_log_file.close()
def run_experiment(vargs):
# ----------------------- TRAINING ---------------------------------------
kwargs = json.load(open(vargs[1], 'r'))
exp_id = random.sample(range(1, 1000), 1)[0]
v = kwargs['variant']
exp_name = "model_trpo_train_env_%s_%i_%i_%i_%i_id_%i" % (
v['env'], v['path_length'], v['num_gradient_steps_per_iter'],
v['batch_size_env_samples'], v['seed'], exp_id)
v = instantiate_class_stings(v)
kwargs['variant'] = v
run_experiment_lite(
run_train_task,
exp_name=exp_name,
**kwargs
)
def instantiate_class_stings(v):
v['env'] = globals()[v['env']]
for nonlinearity_key in ['hidden_nonlinearity_policy', 'hidden_nonlinearity_model']:
if v[nonlinearity_key] == 'relu':
v[nonlinearity_key] = tf.nn.relu
elif v[nonlinearity_key] == 'tanh':
v[nonlinearity_key] = tf.tanh
elif v[nonlinearity_key] == 'elu':
v[nonlinearity_key] = tf.nn.elu
else:
raise NotImplementedError('Not able to recognize spicified hidden_nonlinearity: %s' % v['hidden_nonlinearity'])
return v
if __name__ == "__main__":
run_experiment(sys.argv)
| from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from sandbox.ours.envs.normalized_env import normalize
from sandbox.ours.envs.base import TfEnv
from sandbox.ours.policies.improved_gauss_mlp_policy import GaussianMLPPolicy
from rllab.misc.instrument import run_experiment_lite
from rllab.misc.instrument import VariantGenerator
from rllab import config
from experiments.helpers.ec2_helpers import cheapest_subnets
from sandbox.ours.dynamics import MLPDynamicsEnsemble
from sandbox.ours.algos.ModelTRPO.model_trpo import ModelTRPO
from sandbox.ours.envs.mujoco import AntEnvRandParams, HalfCheetahEnvRandParams, HopperEnvRandParams, SwimmerEnvRandParams, WalkerEnvRandomParams
import tensorflow as tf
import sys
import argparse
import random
import json
import os
def run_train_task(vv):
import sys
print(vv['exp_prefix'])
sysout_log_path = os.path.join(config.LOG_DIR, 'local', vv['exp_prefix'], vv['exp_name'], 'stdout.log')
sysout_log_file = open(sysout_log_path, 'w')
sys.stdout = sysout_log_file
env = TfEnv(normalize(vv['env'](log_scale_limit=vv['log_scale_limit'])))
dynamics_model = MLPDynamicsEnsemble(
name="dyn_model",
env_spec=env.spec,
hidden_sizes=vv['hidden_sizes_model'],
weight_normalization=vv['weight_normalization_model'],
num_models=vv['num_models'],
valid_split_ratio=vv['valid_split_ratio'],
rolling_average_persitency=vv['rolling_average_persitency']
)
policy = GaussianMLPPolicy(
name="policy",
env_spec=env.spec,
hidden_sizes=vv['hidden_sizes_policy'],
hidden_nonlinearity=vv['hidden_nonlinearity_policy'],
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = ModelTRPO(
env=env,
policy=policy,
dynamics_model=dynamics_model,
baseline=baseline,
batch_size_env_samples=vv['batch_size_env_samples'],
batch_size_dynamics_samples=vv['batch_size_dynamics_samples'],
initial_random_samples=vv['initial_random_samples'],
num_gradient_steps_per_iter=vv['num_gradient_steps_per_iter'],
max_path_length=vv['path_length'],
n_itr=vv['n_itr'],
retrain_model_when_reward_decreases=vv['retrain_model_when_reward_decreases'],
discount=vv['discount'],
step_size=vv["step_size"],
reset_policy_std=vv['reset_policy_std'],
reinit_model_cycle=vv['reinit_model_cycle']
)
algo.train()
sysout_log_file.close()
def run_experiment(vargs):
# ----------------------- TRAINING ---------------------------------------
kwargs = json.load(open(vargs[1], 'r'))
exp_id = random.sample(range(1, 1000), 1)[0]
v = kwargs['variant']
exp_name = "model_trpo_train_env_%s_%i_%i_%i_%i_id_%i" % (
v['env'], v['path_length'], v['num_gradient_steps_per_iter'],
v['batch_size_env_samples'], v['seed'], exp_id)
v = instantiate_class_stings(v)
kwargs['variant'] = v
run_experiment_lite(
run_train_task,
exp_name=exp_name,
**kwargs
)
def instantiate_class_stings(v):
v['env'] = globals()[v['env']]
for nonlinearity_key in ['hidden_nonlinearity_policy', 'hidden_nonlinearity_model']:
if v[nonlinearity_key] == 'relu':
v[nonlinearity_key] = tf.nn.relu
elif v[nonlinearity_key] == 'tanh':
v[nonlinearity_key] = tf.tanh
elif v[nonlinearity_key] == 'elu':
v[nonlinearity_key] = tf.nn.elu
else:
raise NotImplementedError('Not able to recognize spicified hidden_nonlinearity: %s' % v['hidden_nonlinearity'])
return v
if __name__ == "__main__":
run_experiment(sys.argv)
| en | 0.13246 | # ----------------------- TRAINING --------------------------------------- | 1.684105 | 2 |
pyrpgkit/turn.py | feakuru/pyrpgk | 0 | 6616966 | """Classes describing turns and other chronological entities."""
class Move:
"""A single move, e.g. an attemt to use an Ability."""
pass
class Turn:
"""A time-ordered series of Actions."""
pass
class TurnHistory:
"""A chronological sequence of Turns."""
pass | """Classes describing turns and other chronological entities."""
class Move:
"""A single move, e.g. an attemt to use an Ability."""
pass
class Turn:
"""A time-ordered series of Actions."""
pass
class TurnHistory:
"""A chronological sequence of Turns."""
pass | en | 0.898364 | Classes describing turns and other chronological entities. A single move, e.g. an attemt to use an Ability. A time-ordered series of Actions. A chronological sequence of Turns. | 1.777911 | 2 |
src/simplefit/classifier.py | UBC-MDS/simplefit | 0 | 6616967 | <gh_stars>0
import pandas as pd
from sklearn.compose import make_column_transformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
def classifier(train_df, target_col, numeric_feats = None, categorical_feats = None, cv = 5):
"""This function preprocess the data, fit baseline model(dummyclassifier) and logistic regression with default setups to provide data scientists
easy access to the common models results(scores).
Parameters
----------
train_df : pandas.DataFrame
The clean train data which includes target column.
target_col : str
The column of the train data that has the target values.
numeric_feats = list
The numeric features that needs to be considered in the model. If the user enters an empty list, the function will use all numeric columns.
categorical_feats : list
The categorical features that needs to be considered in the model.
cv : int, optional
The number of folds on the data for train and validation set.
Returns
-------
Data frame
A data frame that includes test scores and train scores for each model.
Examples
-------
>>> classifier(train_df, target_col = 'target', numerical_feats = [], categorical_features = [])
>>> classifier(train_df, target_col = 'target', numeric_feats = ['danceability', 'loudness'], categorical_feats=['genre'], cv=10)
"""
if (not(isinstance(train_df , pd.core.frame.DataFrame))):
raise TypeError("Invalid function input. Please enter a data frame")
if (not (train_df.isna().sum().sum() == 0)):
raise ValueError("Invalid function input. Please pass a clean pandas data frame")
if not(isinstance(numeric_feats , list)):
raise TypeError("Numeric Features should be passed as a list")
if not(isinstance(categorical_feats , list)):
raise TypeError("Categorical Features should be passed as a list")
if not(isinstance(target_col , str)):
raise TypeError("Target column must be passed as a string")
X_train = train_df.drop(columns=target_col, axis=1)
y_train = train_df[target_col]
if not isinstance(numeric_feats, list):
raise TypeError("The numeric features have to be entered as a list")
if not isinstance(categorical_feats , list):
raise TypeError("The categorical features have to be entered as a list")
if numeric_feats == None or numeric_feats==[]:
numeric_feats = train_df.select_dtypes(include='number').columns.tolist()
if categorical_feats == None or categorical_feats ==[]:
categorical_feats = train_df.select_dtypes(exclude='number').columns.tolist()
preprocessor = make_column_transformer(
(StandardScaler(), numeric_feats),
(OneHotEncoder(), categorical_feats))
dummy = DummyClassifier()
lr = make_pipeline(preprocessor, LogisticRegression())
results = pd.Series(dtype='float64')
models = {"DummyClassifier": dummy, "LogisticRegression" : lr}
for model in models :
scores = cross_validate(models[model], X_train, y_train, return_train_score = True,cv = cv)
mean_scores = pd.DataFrame(scores).mean().to_frame(model)
results = pd.concat([results, mean_scores], axis = 1)
results = results.drop(columns = 0, axis=1)
return results
| import pandas as pd
from sklearn.compose import make_column_transformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
def classifier(train_df, target_col, numeric_feats = None, categorical_feats = None, cv = 5):
"""This function preprocess the data, fit baseline model(dummyclassifier) and logistic regression with default setups to provide data scientists
easy access to the common models results(scores).
Parameters
----------
train_df : pandas.DataFrame
The clean train data which includes target column.
target_col : str
The column of the train data that has the target values.
numeric_feats = list
The numeric features that needs to be considered in the model. If the user enters an empty list, the function will use all numeric columns.
categorical_feats : list
The categorical features that needs to be considered in the model.
cv : int, optional
The number of folds on the data for train and validation set.
Returns
-------
Data frame
A data frame that includes test scores and train scores for each model.
Examples
-------
>>> classifier(train_df, target_col = 'target', numerical_feats = [], categorical_features = [])
>>> classifier(train_df, target_col = 'target', numeric_feats = ['danceability', 'loudness'], categorical_feats=['genre'], cv=10)
"""
if (not(isinstance(train_df , pd.core.frame.DataFrame))):
raise TypeError("Invalid function input. Please enter a data frame")
if (not (train_df.isna().sum().sum() == 0)):
raise ValueError("Invalid function input. Please pass a clean pandas data frame")
if not(isinstance(numeric_feats , list)):
raise TypeError("Numeric Features should be passed as a list")
if not(isinstance(categorical_feats , list)):
raise TypeError("Categorical Features should be passed as a list")
if not(isinstance(target_col , str)):
raise TypeError("Target column must be passed as a string")
X_train = train_df.drop(columns=target_col, axis=1)
y_train = train_df[target_col]
if not isinstance(numeric_feats, list):
raise TypeError("The numeric features have to be entered as a list")
if not isinstance(categorical_feats , list):
raise TypeError("The categorical features have to be entered as a list")
if numeric_feats == None or numeric_feats==[]:
numeric_feats = train_df.select_dtypes(include='number').columns.tolist()
if categorical_feats == None or categorical_feats ==[]:
categorical_feats = train_df.select_dtypes(exclude='number').columns.tolist()
preprocessor = make_column_transformer(
(StandardScaler(), numeric_feats),
(OneHotEncoder(), categorical_feats))
dummy = DummyClassifier()
lr = make_pipeline(preprocessor, LogisticRegression())
results = pd.Series(dtype='float64')
models = {"DummyClassifier": dummy, "LogisticRegression" : lr}
for model in models :
scores = cross_validate(models[model], X_train, y_train, return_train_score = True,cv = cv)
mean_scores = pd.DataFrame(scores).mean().to_frame(model)
results = pd.concat([results, mean_scores], axis = 1)
results = results.drop(columns = 0, axis=1)
return results | en | 0.687116 | This function preprocess the data, fit baseline model(dummyclassifier) and logistic regression with default setups to provide data scientists easy access to the common models results(scores). Parameters ---------- train_df : pandas.DataFrame The clean train data which includes target column. target_col : str The column of the train data that has the target values. numeric_feats = list The numeric features that needs to be considered in the model. If the user enters an empty list, the function will use all numeric columns. categorical_feats : list The categorical features that needs to be considered in the model. cv : int, optional The number of folds on the data for train and validation set. Returns ------- Data frame A data frame that includes test scores and train scores for each model. Examples ------- >>> classifier(train_df, target_col = 'target', numerical_feats = [], categorical_features = []) >>> classifier(train_df, target_col = 'target', numeric_feats = ['danceability', 'loudness'], categorical_feats=['genre'], cv=10) | 3.740918 | 4 |
tests/test_synchronize.py | kamac/AskXML | 74 | 6616968 | from askxml import *
import xml.etree.ElementTree as ET
import tempfile
import unittest
_xml_file_simple = """
<XML>
<RootTable first="1" second="2">
<Child>Hello</Child>
<Child third="3"></Child>
</RootTable>
<RootTable />
<RootTableSecond>Hi</RootTableSecond>
</XML>"""
class TestSynchronize(unittest.TestCase):
def test_preserves_structure(self):
with tempfile.SpooledTemporaryFile(mode='w+') as f:
f.write(_xml_file_simple)
f.seek(0)
AskXML(f).close()
f.seek(0)
tree = ET.parse(f)
root = tree.getroot()
self.assertEqual(root.tag, 'XML')
self.assertFalse(root.attrib)
# inspect root node
children = [child for child in root]
children_tags = [c.tag for c in children]
self.assertTrue('RootTable' in children_tags)
self.assertTrue('RootTableSecond' in children_tags)
self.assertEqual(len(children), 3)
RootTables = (c for c in children if c.tag == 'RootTable')
# inspect first RootTable
RootTable = next(RootTables)
RootTable_children = [child for child in RootTable]
RootTable_children_tags = [c.tag for c in RootTable_children]
self.assertTrue('Child' in RootTable_children_tags)
self.assertEqual(len(RootTable_children), 2)
# inspect Children
# make sure Child tags don't have any child nodes
self.assertEqual(sum(1 for c in RootTable_children[0]), 0)
self.assertEqual(sum(1 for c in RootTable_children[1]), 0)
self.assertFalse(RootTable_children[0].attrib)
self.assertEqual(RootTable_children[0].text, 'Hello')
self.assertEqual(RootTable_children[1].attrib, {'third': '3'})
self.assertFalse(RootTable_children[1].text)
# inspect second RootTable
RootTable = next(RootTables)
self.assertEqual(sum(1 for c in RootTable), 0)
self.assertFalse(RootTable.attrib)
# inspect RootTableSecond
RootTableSecond = next(c for c in children if c.tag == 'RootTableSecond')
self.assertEqual(sum(1 for c in RootTableSecond), 0)
self.assertFalse(RootTableSecond.attrib)
self.assertEqual(RootTableSecond.text, 'Hi')
if __name__ == '__main__':
unittest.main() | from askxml import *
import xml.etree.ElementTree as ET
import tempfile
import unittest
_xml_file_simple = """
<XML>
<RootTable first="1" second="2">
<Child>Hello</Child>
<Child third="3"></Child>
</RootTable>
<RootTable />
<RootTableSecond>Hi</RootTableSecond>
</XML>"""
class TestSynchronize(unittest.TestCase):
def test_preserves_structure(self):
with tempfile.SpooledTemporaryFile(mode='w+') as f:
f.write(_xml_file_simple)
f.seek(0)
AskXML(f).close()
f.seek(0)
tree = ET.parse(f)
root = tree.getroot()
self.assertEqual(root.tag, 'XML')
self.assertFalse(root.attrib)
# inspect root node
children = [child for child in root]
children_tags = [c.tag for c in children]
self.assertTrue('RootTable' in children_tags)
self.assertTrue('RootTableSecond' in children_tags)
self.assertEqual(len(children), 3)
RootTables = (c for c in children if c.tag == 'RootTable')
# inspect first RootTable
RootTable = next(RootTables)
RootTable_children = [child for child in RootTable]
RootTable_children_tags = [c.tag for c in RootTable_children]
self.assertTrue('Child' in RootTable_children_tags)
self.assertEqual(len(RootTable_children), 2)
# inspect Children
# make sure Child tags don't have any child nodes
self.assertEqual(sum(1 for c in RootTable_children[0]), 0)
self.assertEqual(sum(1 for c in RootTable_children[1]), 0)
self.assertFalse(RootTable_children[0].attrib)
self.assertEqual(RootTable_children[0].text, 'Hello')
self.assertEqual(RootTable_children[1].attrib, {'third': '3'})
self.assertFalse(RootTable_children[1].text)
# inspect second RootTable
RootTable = next(RootTables)
self.assertEqual(sum(1 for c in RootTable), 0)
self.assertFalse(RootTable.attrib)
# inspect RootTableSecond
RootTableSecond = next(c for c in children if c.tag == 'RootTableSecond')
self.assertEqual(sum(1 for c in RootTableSecond), 0)
self.assertFalse(RootTableSecond.attrib)
self.assertEqual(RootTableSecond.text, 'Hi')
if __name__ == '__main__':
unittest.main() | en | 0.580894 | <XML> <RootTable first="1" second="2"> <Child>Hello</Child> <Child third="3"></Child> </RootTable> <RootTable /> <RootTableSecond>Hi</RootTableSecond> </XML> # inspect root node # inspect first RootTable # inspect Children # make sure Child tags don't have any child nodes # inspect second RootTable # inspect RootTableSecond | 2.842107 | 3 |
Serial/run_experiments.py | Jeff010101/CS5421-database-tuning | 2 | 6616969 | import argparse
from sqlalchemy.orm import sessionmaker
import subprocess
import ast
from db_connect import get_conn
## Argument parser to take the parameters from the command line
## Example on how to run: python run_experiments.py 100 100 100 READ_COMMITTED
parser = argparse.ArgumentParser()
parser.add_argument('S', type = int, help = 'number of sums')
parser.add_argument('E', type = int, help = 'number of exchange transactions in a process')
parser.add_argument('P', type = int, help = 'number of process')
parser.add_argument('I', help = 'isolation level')
args = parser.parse_args()
## Create engine
engine = get_conn()
Session = sessionmaker(bind=engine.execution_options(isolation_level=args.I, autocommit=True))
sess = Session()
## Calculate the correct sum before applying the function
p0 = subprocess.Popen(['python', 'c:/Users/User/OneDrive/NUS/course/CS5421/assigment/project5/run_sums.py', str(1), args.I], stdout=subprocess.PIPE)
correct_sum = ast.literal_eval(p0.communicate()[0])[0]
## Create subprocess to run the transactions paralelly
p1 = subprocess.Popen(['python', 'c:/Users/User/OneDrive/NUS/course/CS5421/assigment/project5/run_sums.py', str(args.S), args.I], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['python', 'c:/Users/User/OneDrive/NUS/course/CS5421/assigment/project5/run_all_exchanges.py', str(args.E), str(args.P), args.I], stdout=subprocess.PIPE)
p1.wait()
p2.wait()
## Check the new sums and calculate correctness
sums = ast.literal_eval(p1.communicate()[0])
time = ast.literal_eval(p2.communicate()[0])
good = [x for x in sums if abs(x - correct_sum) < 0.01]
print 'Correctness:', float(len(good)) / args.S
print 'Time:', time
| import argparse
from sqlalchemy.orm import sessionmaker
import subprocess
import ast
from db_connect import get_conn
## Argument parser to take the parameters from the command line
## Example on how to run: python run_experiments.py 100 100 100 READ_COMMITTED
parser = argparse.ArgumentParser()
parser.add_argument('S', type = int, help = 'number of sums')
parser.add_argument('E', type = int, help = 'number of exchange transactions in a process')
parser.add_argument('P', type = int, help = 'number of process')
parser.add_argument('I', help = 'isolation level')
args = parser.parse_args()
## Create engine
engine = get_conn()
Session = sessionmaker(bind=engine.execution_options(isolation_level=args.I, autocommit=True))
sess = Session()
## Calculate the correct sum before applying the function
p0 = subprocess.Popen(['python', 'c:/Users/User/OneDrive/NUS/course/CS5421/assigment/project5/run_sums.py', str(1), args.I], stdout=subprocess.PIPE)
correct_sum = ast.literal_eval(p0.communicate()[0])[0]
## Create subprocess to run the transactions paralelly
p1 = subprocess.Popen(['python', 'c:/Users/User/OneDrive/NUS/course/CS5421/assigment/project5/run_sums.py', str(args.S), args.I], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['python', 'c:/Users/User/OneDrive/NUS/course/CS5421/assigment/project5/run_all_exchanges.py', str(args.E), str(args.P), args.I], stdout=subprocess.PIPE)
p1.wait()
p2.wait()
## Check the new sums and calculate correctness
sums = ast.literal_eval(p1.communicate()[0])
time = ast.literal_eval(p2.communicate()[0])
good = [x for x in sums if abs(x - correct_sum) < 0.01]
print 'Correctness:', float(len(good)) / args.S
print 'Time:', time
| en | 0.524963 | ## Argument parser to take the parameters from the command line ## Example on how to run: python run_experiments.py 100 100 100 READ_COMMITTED ## Create engine ## Calculate the correct sum before applying the function ## Create subprocess to run the transactions paralelly ## Check the new sums and calculate correctness | 2.39927 | 2 |
broker/rabbitmq_producer.py | juanitodread/message-queue-poc | 0 | 6616970 | from pika.adapters.blocking_connection import BlockingConnection
from pika.connection import ConnectionParameters
HOST = 'localhost'
class Producer:
def __init__(self):
self._connection = BlockingConnection(ConnectionParameters(HOST))
def produce(self, message: str, queue: str) -> None:
channel = self.get_channel()
channel.exchange_declare(
exchange=queue,
exchange_type='fanout'
)
channel.basic_publish(
exchange=queue,
routing_key='',
body=message
)
print(f'Message: {message} published on queue: {queue}')
def get_channel(self):
return self._connection.channel()
def open(self) -> None:
self._connection = BlockingConnection(ConnectionParameters(HOST))
def close(self) -> None:
self._connection.close()
| from pika.adapters.blocking_connection import BlockingConnection
from pika.connection import ConnectionParameters
HOST = 'localhost'
class Producer:
def __init__(self):
self._connection = BlockingConnection(ConnectionParameters(HOST))
def produce(self, message: str, queue: str) -> None:
channel = self.get_channel()
channel.exchange_declare(
exchange=queue,
exchange_type='fanout'
)
channel.basic_publish(
exchange=queue,
routing_key='',
body=message
)
print(f'Message: {message} published on queue: {queue}')
def get_channel(self):
return self._connection.channel()
def open(self) -> None:
self._connection = BlockingConnection(ConnectionParameters(HOST))
def close(self) -> None:
self._connection.close()
| none | 1 | 2.802173 | 3 | |
magni/cs/reconstruction/gamp/_config.py | SIP-AAU/Magni | 42 | 6616971 | """
..
Copyright (c) 2015-2017, Magni developers.
All rights reserved.
See LICENSE.rst for further information.
Module providing configuration options for the `magni.cs.reconstruction.gamp`
subpackage.
See also
--------
magni.cs.reconstruction._config.Configger : The Configger class used
Notes
-----
This module instantiates the `Configger` class provided by
`magni.cs.reconstruction._config.Configger`. The configuration options are the
following:
damping : float
The damping applied to the variable side updates (the default is 0.0).
input_channel : magni.utils.validation.types.MMSEInputChannel
The input channel to use (the default is
magni.cs.reconstruction.gamp.input_channel.IIDBG).
input_channel_parameters : dict
The parameters used in the input channel (no default is provided, which
implies that this must be specified by the user).
iterations : int
The maximum number of iterations to do (the default is 300).
output_channel : magni.utils.validation.types.MMSEOutputChannel
The output channel to use (the default is
magni.cs.reconstruction.gamp.output_channel.AWGN).
output_channel_parameters : dict
The parameters used in the output channel (no default is provided, which
implies that this must be specified by the user).
precision_float : {np.float, np.float16, np.float32, np.float64, np.float128,
np.complex64, np.complex128, np.complex256}
The floating point precision used for the computations (the default is
np.float64).
report_A_asq_setup : bool
The indicator of whether or not to print the A_asq details (the default is
False).
report_history : bool
The indicator of whether or not to return the progress history along with
the result (the default is False).
stop_criterion : magni.utils.validation.types.StopCriterion
The stop criterion to use in the iterations (the default is
magni.cs.reconstruction.gamp.stop_criterion.MSEConvergence).
sum_approximation_constant : dict
The method and constant used in a sum approximation of the squared system
transform (the default is {'rangan': 1.0}, which implies that Rangan's
uniform variance methods with the system matrix adapted ||A||_F^2/(m*n)
constant is used).
tolerance : float
The least acceptable stop criterion tolerance to break the interations (the
default is 1e-6).
true_solution : ndarray or None
The true solution to allow for tracking the convergence of the algorithm in
the artificial setup where the true solution is known a-priori (the default
is None, which implies that no true solution tracking is used).
warm_start : list or tuple
The collection containing the initial guess of the solution vector
(alpha_bar) and the solution variance vector (alpha_tilde) (the default is
None, which implies that alpha_bar is taken to be a vector of zeros and
alpha_tilde is taken to be a vector of ones).
"""
from __future__ import division
import numpy as np
from magni.cs.reconstruction._config import Configger as _Configger
from magni.cs.reconstruction.gamp.stop_criterion import (
MSEConvergence as _MSEConvergence)
from magni.cs.reconstruction.gamp.input_channel import IIDBG as _IIDBG
from magni.cs.reconstruction.gamp.output_channel import AWGN as _AWGN
from magni.utils.validation import validate_generic as _generic
from magni.utils.validation import validate_levels as _levels
from magni.utils.validation import validate_numeric as _numeric
from magni.utils.validation.types import (
MMSEInputChannel as _MMSEInputChannel,
MMSEOutputChannel as _MMSEOutputChannel, StopCriterion as _StopCriterion)
configger = _Configger(
{'damping': 0.0,
'input_channel': _IIDBG,
'input_channel_parameters': dict(),
'iterations': 300,
'output_channel': _AWGN,
'output_channel_parameters': dict(),
'precision_float': np.float64,
'report_A_asq_setup': False,
'report_history': False,
'stop_criterion': _MSEConvergence,
'sum_approximation_constant': {'rangan': 1.0},
'tolerance': 1e-6,
'true_solution': None,
'warm_start': None},
{'damping': _numeric(None, 'floating', range_='[0;1)'),
'input_channel': _generic(None, 'class', superclass=_MMSEInputChannel),
'input_channel_parameters': _generic(None, 'mapping'),
'iterations': _numeric(None, 'integer', range_='[1;inf)'),
'output_channel': _generic(None, 'class', superclass=_MMSEOutputChannel),
'output_channel_parameters': _generic(None, 'mapping'),
'precision_float': _generic(None, type, value_in=(
np.float,
getattr(np, 'float16', np.float_),
getattr(np, 'float32', np.float_),
getattr(np, 'float64', np.float_),
getattr(np, 'float128', np.float_),
getattr(np, 'complex64', np.complex_),
getattr(np, 'complex128', np.complex_),
getattr(np, 'complex256', np.complex_))),
'report_A_asq_setup': _numeric(None, 'boolean'),
'report_history': _numeric(None, 'boolean'),
'stop_criterion': _generic(None, 'class', superclass=_StopCriterion),
'sum_approximation_constant': _levels(None, (
_generic(None, 'mapping', keys_in=('rangan', 'krzakala'), len_=1),
_numeric(None, ('integer', 'floating'), range_='(0;inf)'))),
'tolerance': _numeric(None, 'floating', range_='[0;inf)'),
'true_solution': _numeric(
None, ('integer', 'floating', 'complex'), shape=(-1, 1),
ignore_none=True),
'warm_start': _levels(None, (
_generic(None, 'explicit collection', len_=2, ignore_none=True),
_numeric(None, ('integer', 'floating', 'complex'), shape=(-1, 1))))})
| """
..
Copyright (c) 2015-2017, Magni developers.
All rights reserved.
See LICENSE.rst for further information.
Module providing configuration options for the `magni.cs.reconstruction.gamp`
subpackage.
See also
--------
magni.cs.reconstruction._config.Configger : The Configger class used
Notes
-----
This module instantiates the `Configger` class provided by
`magni.cs.reconstruction._config.Configger`. The configuration options are the
following:
damping : float
The damping applied to the variable side updates (the default is 0.0).
input_channel : magni.utils.validation.types.MMSEInputChannel
The input channel to use (the default is
magni.cs.reconstruction.gamp.input_channel.IIDBG).
input_channel_parameters : dict
The parameters used in the input channel (no default is provided, which
implies that this must be specified by the user).
iterations : int
The maximum number of iterations to do (the default is 300).
output_channel : magni.utils.validation.types.MMSEOutputChannel
The output channel to use (the default is
magni.cs.reconstruction.gamp.output_channel.AWGN).
output_channel_parameters : dict
The parameters used in the output channel (no default is provided, which
implies that this must be specified by the user).
precision_float : {np.float, np.float16, np.float32, np.float64, np.float128,
np.complex64, np.complex128, np.complex256}
The floating point precision used for the computations (the default is
np.float64).
report_A_asq_setup : bool
The indicator of whether or not to print the A_asq details (the default is
False).
report_history : bool
The indicator of whether or not to return the progress history along with
the result (the default is False).
stop_criterion : magni.utils.validation.types.StopCriterion
The stop criterion to use in the iterations (the default is
magni.cs.reconstruction.gamp.stop_criterion.MSEConvergence).
sum_approximation_constant : dict
The method and constant used in a sum approximation of the squared system
transform (the default is {'rangan': 1.0}, which implies that Rangan's
uniform variance methods with the system matrix adapted ||A||_F^2/(m*n)
constant is used).
tolerance : float
The least acceptable stop criterion tolerance to break the interations (the
default is 1e-6).
true_solution : ndarray or None
The true solution to allow for tracking the convergence of the algorithm in
the artificial setup where the true solution is known a-priori (the default
is None, which implies that no true solution tracking is used).
warm_start : list or tuple
The collection containing the initial guess of the solution vector
(alpha_bar) and the solution variance vector (alpha_tilde) (the default is
None, which implies that alpha_bar is taken to be a vector of zeros and
alpha_tilde is taken to be a vector of ones).
"""
from __future__ import division
import numpy as np
from magni.cs.reconstruction._config import Configger as _Configger
from magni.cs.reconstruction.gamp.stop_criterion import (
MSEConvergence as _MSEConvergence)
from magni.cs.reconstruction.gamp.input_channel import IIDBG as _IIDBG
from magni.cs.reconstruction.gamp.output_channel import AWGN as _AWGN
from magni.utils.validation import validate_generic as _generic
from magni.utils.validation import validate_levels as _levels
from magni.utils.validation import validate_numeric as _numeric
from magni.utils.validation.types import (
MMSEInputChannel as _MMSEInputChannel,
MMSEOutputChannel as _MMSEOutputChannel, StopCriterion as _StopCriterion)
configger = _Configger(
{'damping': 0.0,
'input_channel': _IIDBG,
'input_channel_parameters': dict(),
'iterations': 300,
'output_channel': _AWGN,
'output_channel_parameters': dict(),
'precision_float': np.float64,
'report_A_asq_setup': False,
'report_history': False,
'stop_criterion': _MSEConvergence,
'sum_approximation_constant': {'rangan': 1.0},
'tolerance': 1e-6,
'true_solution': None,
'warm_start': None},
{'damping': _numeric(None, 'floating', range_='[0;1)'),
'input_channel': _generic(None, 'class', superclass=_MMSEInputChannel),
'input_channel_parameters': _generic(None, 'mapping'),
'iterations': _numeric(None, 'integer', range_='[1;inf)'),
'output_channel': _generic(None, 'class', superclass=_MMSEOutputChannel),
'output_channel_parameters': _generic(None, 'mapping'),
'precision_float': _generic(None, type, value_in=(
np.float,
getattr(np, 'float16', np.float_),
getattr(np, 'float32', np.float_),
getattr(np, 'float64', np.float_),
getattr(np, 'float128', np.float_),
getattr(np, 'complex64', np.complex_),
getattr(np, 'complex128', np.complex_),
getattr(np, 'complex256', np.complex_))),
'report_A_asq_setup': _numeric(None, 'boolean'),
'report_history': _numeric(None, 'boolean'),
'stop_criterion': _generic(None, 'class', superclass=_StopCriterion),
'sum_approximation_constant': _levels(None, (
_generic(None, 'mapping', keys_in=('rangan', 'krzakala'), len_=1),
_numeric(None, ('integer', 'floating'), range_='(0;inf)'))),
'tolerance': _numeric(None, 'floating', range_='[0;inf)'),
'true_solution': _numeric(
None, ('integer', 'floating', 'complex'), shape=(-1, 1),
ignore_none=True),
'warm_start': _levels(None, (
_generic(None, 'explicit collection', len_=2, ignore_none=True),
_numeric(None, ('integer', 'floating', 'complex'), shape=(-1, 1))))})
| en | 0.670902 | .. Copyright (c) 2015-2017, Magni developers. All rights reserved. See LICENSE.rst for further information. Module providing configuration options for the `magni.cs.reconstruction.gamp` subpackage. See also -------- magni.cs.reconstruction._config.Configger : The Configger class used Notes ----- This module instantiates the `Configger` class provided by `magni.cs.reconstruction._config.Configger`. The configuration options are the following: damping : float The damping applied to the variable side updates (the default is 0.0). input_channel : magni.utils.validation.types.MMSEInputChannel The input channel to use (the default is magni.cs.reconstruction.gamp.input_channel.IIDBG). input_channel_parameters : dict The parameters used in the input channel (no default is provided, which implies that this must be specified by the user). iterations : int The maximum number of iterations to do (the default is 300). output_channel : magni.utils.validation.types.MMSEOutputChannel The output channel to use (the default is magni.cs.reconstruction.gamp.output_channel.AWGN). output_channel_parameters : dict The parameters used in the output channel (no default is provided, which implies that this must be specified by the user). precision_float : {np.float, np.float16, np.float32, np.float64, np.float128, np.complex64, np.complex128, np.complex256} The floating point precision used for the computations (the default is np.float64). report_A_asq_setup : bool The indicator of whether or not to print the A_asq details (the default is False). report_history : bool The indicator of whether or not to return the progress history along with the result (the default is False). stop_criterion : magni.utils.validation.types.StopCriterion The stop criterion to use in the iterations (the default is magni.cs.reconstruction.gamp.stop_criterion.MSEConvergence). sum_approximation_constant : dict The method and constant used in a sum approximation of the squared system transform (the default is {'rangan': 1.0}, which implies that Rangan's uniform variance methods with the system matrix adapted ||A||_F^2/(m*n) constant is used). tolerance : float The least acceptable stop criterion tolerance to break the interations (the default is 1e-6). true_solution : ndarray or None The true solution to allow for tracking the convergence of the algorithm in the artificial setup where the true solution is known a-priori (the default is None, which implies that no true solution tracking is used). warm_start : list or tuple The collection containing the initial guess of the solution vector (alpha_bar) and the solution variance vector (alpha_tilde) (the default is None, which implies that alpha_bar is taken to be a vector of zeros and alpha_tilde is taken to be a vector of ones). | 1.760558 | 2 |
lab6/code/part2/gnn_karate.py | khaoulabelahsen/MVA-AlteGrad | 1 | 6616972 | """
Deep Learning on Graphs - ALTEGRAD - Dec 2019
"""
import numpy as np
import networkx as nx
import time
import torch
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import accuracy_score, log_loss
from utils import accuracy, normalize_adjacency
from models import GNN
# Hyperparameters
epochs = 100
n_hidden_1 = 8
n_hidden_2 = 16
learning_rate = 0.01
dropout_rate = 0.1
# Loads the karate network
G = nx.read_weighted_edgelist('karate.edgelist', delimiter=' ', nodetype=int, create_using=nx.Graph())
print(G.number_of_nodes())
print(G.number_of_edges())
n = G.number_of_nodes()
# Loads the class labels
class_labels = np.loadtxt('karate_labels.txt', delimiter=',', dtype=np.int32)
idx_to_class_label = dict()
for i in range(class_labels.shape[0]):
idx_to_class_label[class_labels[i,0]] = class_labels[i,1]
y = list()
for node in G.nodes():
y.append(idx_to_class_label[node])
y = np.array(y)
n_class = 2
adj = nx.to_numpy_matrix(G) # Obtains the adjacency matrix
adj = normalize_adjacency(adj) # Normalizes the adjacency matrix
############## Task 12
# Set the feature of all nodes to the same value
# Yields indices to split data into training and test sets
idx = np.random.RandomState(seed=42).permutation(n)
idx_train = idx[:int(0.8*n)]
idx_test = idx[int(0.8*n):]
features = np.ones((n,n)) # Generates node features
# Transforms the numpy matrices/vectors to torch tensors
features = torch.FloatTensor(features)
y = torch.LongTensor(y)
adj = torch.FloatTensor(adj)
idx_train = torch.LongTensor(idx_train)
idx_test = torch.LongTensor(idx_test)
# Creates the model and specifies the optimizer
model = GNN(features.shape[1], n_hidden_1, n_hidden_2, n_class, dropout_rate)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
def train(epoch):
t = time.time()
model.train()
optimizer.zero_grad()
output = model(features, adj)
loss_train = F.nll_loss(output[idx_train], y[idx_train])
acc_train = accuracy(output[idx_train], y[idx_train])
loss_train.backward()
optimizer.step()
print('Epoch: {:03d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'time: {:.4f}s'.format(time.time() - t))
def test():
model.eval()
output = model(features, adj)
loss_test = F.nll_loss(output[idx_test], y[idx_test])
acc_test = accuracy(output[idx_test], y[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
# Train model
t_total = time.time()
for epoch in range(epochs):
train(epoch)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
print()
# Testing
test()
features = np.eye(n) # Generates node features
# Transforms the numpy matrices/vectors to torch tensors
features = torch.FloatTensor(features)
y = torch.LongTensor(y)
adj = torch.FloatTensor(adj)
idx_train = torch.LongTensor(idx_train)
idx_test = torch.LongTensor(idx_test)
# Creates the model and specifies the optimizer
model = GNN(features.shape[1], n_hidden_1, n_hidden_2, n_class, dropout_rate)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
def train(epoch):
t = time.time()
model.train()
optimizer.zero_grad()
output = model(features, adj)
loss_train = F.nll_loss(output[idx_train], y[idx_train])
acc_train = accuracy(output[idx_train], y[idx_train])
loss_train.backward()
optimizer.step()
print('Epoch: {:03d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'time: {:.4f}s'.format(time.time() - t))
def test():
model.eval()
output = model(features, adj)
loss_test = F.nll_loss(output[idx_test], y[idx_test])
acc_test = accuracy(output[idx_test], y[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
# Train model
t_total = time.time()
for epoch in range(epochs):
train(epoch)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
print()
# Testing
test() | """
Deep Learning on Graphs - ALTEGRAD - Dec 2019
"""
import numpy as np
import networkx as nx
import time
import torch
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import accuracy_score, log_loss
from utils import accuracy, normalize_adjacency
from models import GNN
# Hyperparameters
epochs = 100
n_hidden_1 = 8
n_hidden_2 = 16
learning_rate = 0.01
dropout_rate = 0.1
# Loads the karate network
G = nx.read_weighted_edgelist('karate.edgelist', delimiter=' ', nodetype=int, create_using=nx.Graph())
print(G.number_of_nodes())
print(G.number_of_edges())
n = G.number_of_nodes()
# Loads the class labels
class_labels = np.loadtxt('karate_labels.txt', delimiter=',', dtype=np.int32)
idx_to_class_label = dict()
for i in range(class_labels.shape[0]):
idx_to_class_label[class_labels[i,0]] = class_labels[i,1]
y = list()
for node in G.nodes():
y.append(idx_to_class_label[node])
y = np.array(y)
n_class = 2
adj = nx.to_numpy_matrix(G) # Obtains the adjacency matrix
adj = normalize_adjacency(adj) # Normalizes the adjacency matrix
############## Task 12
# Set the feature of all nodes to the same value
# Yields indices to split data into training and test sets
idx = np.random.RandomState(seed=42).permutation(n)
idx_train = idx[:int(0.8*n)]
idx_test = idx[int(0.8*n):]
features = np.ones((n,n)) # Generates node features
# Transforms the numpy matrices/vectors to torch tensors
features = torch.FloatTensor(features)
y = torch.LongTensor(y)
adj = torch.FloatTensor(adj)
idx_train = torch.LongTensor(idx_train)
idx_test = torch.LongTensor(idx_test)
# Creates the model and specifies the optimizer
model = GNN(features.shape[1], n_hidden_1, n_hidden_2, n_class, dropout_rate)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
def train(epoch):
t = time.time()
model.train()
optimizer.zero_grad()
output = model(features, adj)
loss_train = F.nll_loss(output[idx_train], y[idx_train])
acc_train = accuracy(output[idx_train], y[idx_train])
loss_train.backward()
optimizer.step()
print('Epoch: {:03d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'time: {:.4f}s'.format(time.time() - t))
def test():
model.eval()
output = model(features, adj)
loss_test = F.nll_loss(output[idx_test], y[idx_test])
acc_test = accuracy(output[idx_test], y[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
# Train model
t_total = time.time()
for epoch in range(epochs):
train(epoch)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
print()
# Testing
test()
features = np.eye(n) # Generates node features
# Transforms the numpy matrices/vectors to torch tensors
features = torch.FloatTensor(features)
y = torch.LongTensor(y)
adj = torch.FloatTensor(adj)
idx_train = torch.LongTensor(idx_train)
idx_test = torch.LongTensor(idx_test)
# Creates the model and specifies the optimizer
model = GNN(features.shape[1], n_hidden_1, n_hidden_2, n_class, dropout_rate)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
def train(epoch):
t = time.time()
model.train()
optimizer.zero_grad()
output = model(features, adj)
loss_train = F.nll_loss(output[idx_train], y[idx_train])
acc_train = accuracy(output[idx_train], y[idx_train])
loss_train.backward()
optimizer.step()
print('Epoch: {:03d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'time: {:.4f}s'.format(time.time() - t))
def test():
model.eval()
output = model(features, adj)
loss_test = F.nll_loss(output[idx_test], y[idx_test])
acc_test = accuracy(output[idx_test], y[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
# Train model
t_total = time.time()
for epoch in range(epochs):
train(epoch)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
print()
# Testing
test() | en | 0.729615 | Deep Learning on Graphs - ALTEGRAD - Dec 2019 # Hyperparameters # Loads the karate network # Loads the class labels # Obtains the adjacency matrix # Normalizes the adjacency matrix ############## Task 12 # Set the feature of all nodes to the same value # Yields indices to split data into training and test sets # Generates node features # Transforms the numpy matrices/vectors to torch tensors # Creates the model and specifies the optimizer # Train model # Testing # Generates node features # Transforms the numpy matrices/vectors to torch tensors # Creates the model and specifies the optimizer # Train model # Testing | 2.799175 | 3 |
saleor/graphql/discount/filters.py | eanknd/saleor | 1,392 | 6616973 | <gh_stars>1000+
import decimal
from typing import List
import django_filters
from django.db.models import Exists, OuterRef, Q
from django.utils import timezone
from ...discount import DiscountValueType
from ...discount.models import Sale, SaleChannelListing, Voucher, VoucherQueryset
from ..core.filters import ListObjectTypeFilter, MetadataFilterBase, ObjectTypeFilter
from ..core.types import DateTimeRangeInput, IntRangeInput
from ..utils.filters import filter_range_field
from .enums import DiscountStatusEnum, DiscountValueTypeEnum, VoucherDiscountType
def filter_status(
qs: VoucherQueryset, _, value: List[DiscountStatusEnum]
) -> VoucherQueryset:
if not value:
return qs
query_objects = qs.none()
now = timezone.now()
if DiscountStatusEnum.ACTIVE in value:
query_objects |= qs.active(now)
if DiscountStatusEnum.EXPIRED in value:
query_objects |= qs.expired(now)
if DiscountStatusEnum.SCHEDULED in value:
query_objects |= qs.filter(start_date__gt=now)
return qs & query_objects
def filter_times_used(qs, _, value):
return filter_range_field(qs, "used", value)
def filter_discount_type(
qs: VoucherQueryset, _, values: List[VoucherDiscountType]
) -> VoucherQueryset:
if values:
query = Q()
if VoucherDiscountType.FIXED in values:
query |= Q(
discount_value_type=VoucherDiscountType.FIXED.value # type: ignore
)
if VoucherDiscountType.PERCENTAGE in values:
query |= Q(
discount_value_type=VoucherDiscountType.PERCENTAGE.value # type: ignore
) # type: ignore
if VoucherDiscountType.SHIPPING in values:
query |= Q(type=VoucherDiscountType.SHIPPING.value) # type: ignore
qs = qs.filter(query)
return qs
def filter_started(qs, _, value):
return filter_range_field(qs, "start_date", value)
def filter_sale_type(qs, _, value):
if value in [DiscountValueType.FIXED, DiscountValueType.PERCENTAGE]:
qs = qs.filter(type=value)
return qs
def filter_sale_search(qs, _, value):
try:
value = decimal.Decimal(value)
except decimal.DecimalException:
return qs.filter(Q(name__ilike=value) | Q(type__ilike=value))
channel_listings = SaleChannelListing.objects.filter(discount_value=value).values(
"pk"
)
return qs.filter(Exists(channel_listings.filter(sale_id=OuterRef("pk"))))
def filter_voucher_search(qs, _, value):
return qs.filter(Q(name__ilike=value) | Q(code__ilike=value))
def filter_updated_at_range(qs, _, value):
return filter_range_field(qs, "updated_at", value)
class VoucherFilter(MetadataFilterBase):
status = ListObjectTypeFilter(input_class=DiscountStatusEnum, method=filter_status)
times_used = ObjectTypeFilter(input_class=IntRangeInput, method=filter_times_used)
discount_type = ListObjectTypeFilter(
input_class=VoucherDiscountType, method=filter_discount_type
)
started = ObjectTypeFilter(input_class=DateTimeRangeInput, method=filter_started)
search = django_filters.CharFilter(method=filter_voucher_search)
class Meta:
model = Voucher
fields = ["status", "times_used", "discount_type", "started", "search"]
class SaleFilter(MetadataFilterBase):
status = ListObjectTypeFilter(input_class=DiscountStatusEnum, method=filter_status)
sale_type = ObjectTypeFilter(
input_class=DiscountValueTypeEnum, method=filter_sale_type
)
started = ObjectTypeFilter(input_class=DateTimeRangeInput, method=filter_started)
updated_at = ObjectTypeFilter(
input_class=DateTimeRangeInput, method=filter_updated_at_range
)
search = django_filters.CharFilter(method=filter_sale_search)
class Meta:
model = Sale
fields = ["status", "sale_type", "started", "search"]
| import decimal
from typing import List
import django_filters
from django.db.models import Exists, OuterRef, Q
from django.utils import timezone
from ...discount import DiscountValueType
from ...discount.models import Sale, SaleChannelListing, Voucher, VoucherQueryset
from ..core.filters import ListObjectTypeFilter, MetadataFilterBase, ObjectTypeFilter
from ..core.types import DateTimeRangeInput, IntRangeInput
from ..utils.filters import filter_range_field
from .enums import DiscountStatusEnum, DiscountValueTypeEnum, VoucherDiscountType
def filter_status(
qs: VoucherQueryset, _, value: List[DiscountStatusEnum]
) -> VoucherQueryset:
if not value:
return qs
query_objects = qs.none()
now = timezone.now()
if DiscountStatusEnum.ACTIVE in value:
query_objects |= qs.active(now)
if DiscountStatusEnum.EXPIRED in value:
query_objects |= qs.expired(now)
if DiscountStatusEnum.SCHEDULED in value:
query_objects |= qs.filter(start_date__gt=now)
return qs & query_objects
def filter_times_used(qs, _, value):
return filter_range_field(qs, "used", value)
def filter_discount_type(
qs: VoucherQueryset, _, values: List[VoucherDiscountType]
) -> VoucherQueryset:
if values:
query = Q()
if VoucherDiscountType.FIXED in values:
query |= Q(
discount_value_type=VoucherDiscountType.FIXED.value # type: ignore
)
if VoucherDiscountType.PERCENTAGE in values:
query |= Q(
discount_value_type=VoucherDiscountType.PERCENTAGE.value # type: ignore
) # type: ignore
if VoucherDiscountType.SHIPPING in values:
query |= Q(type=VoucherDiscountType.SHIPPING.value) # type: ignore
qs = qs.filter(query)
return qs
def filter_started(qs, _, value):
return filter_range_field(qs, "start_date", value)
def filter_sale_type(qs, _, value):
if value in [DiscountValueType.FIXED, DiscountValueType.PERCENTAGE]:
qs = qs.filter(type=value)
return qs
def filter_sale_search(qs, _, value):
try:
value = decimal.Decimal(value)
except decimal.DecimalException:
return qs.filter(Q(name__ilike=value) | Q(type__ilike=value))
channel_listings = SaleChannelListing.objects.filter(discount_value=value).values(
"pk"
)
return qs.filter(Exists(channel_listings.filter(sale_id=OuterRef("pk"))))
def filter_voucher_search(qs, _, value):
return qs.filter(Q(name__ilike=value) | Q(code__ilike=value))
def filter_updated_at_range(qs, _, value):
return filter_range_field(qs, "updated_at", value)
class VoucherFilter(MetadataFilterBase):
status = ListObjectTypeFilter(input_class=DiscountStatusEnum, method=filter_status)
times_used = ObjectTypeFilter(input_class=IntRangeInput, method=filter_times_used)
discount_type = ListObjectTypeFilter(
input_class=VoucherDiscountType, method=filter_discount_type
)
started = ObjectTypeFilter(input_class=DateTimeRangeInput, method=filter_started)
search = django_filters.CharFilter(method=filter_voucher_search)
class Meta:
model = Voucher
fields = ["status", "times_used", "discount_type", "started", "search"]
class SaleFilter(MetadataFilterBase):
status = ListObjectTypeFilter(input_class=DiscountStatusEnum, method=filter_status)
sale_type = ObjectTypeFilter(
input_class=DiscountValueTypeEnum, method=filter_sale_type
)
started = ObjectTypeFilter(input_class=DateTimeRangeInput, method=filter_started)
updated_at = ObjectTypeFilter(
input_class=DateTimeRangeInput, method=filter_updated_at_range
)
search = django_filters.CharFilter(method=filter_sale_search)
class Meta:
model = Sale
fields = ["status", "sale_type", "started", "search"] | it | 0.195478 | # type: ignore # type: ignore # type: ignore # type: ignore | 1.838658 | 2 |
listings/chapter05/search_in_graph.py | SaschaKersken/Daten-Prozessanalyse | 2 | 6616974 | from graph import Graph
from node_search import get_path, dfs, bfs
graph = Graph()
graph = Graph()
graph.add_vertex('A')
graph.add_vertex('B')
graph.add_vertex('C')
graph.add_vertex('D')
graph.add_vertex('E')
graph.add_edge('A', 'B')
graph.add_edge('A', 'D')
graph.add_edge('B', 'C')
graph.add_edge('B', 'D')
graph.add_edge('B', 'E')
graph.add_edge('C', 'E')
graph.add_edge('D', 'E')
a_to_c_1 = dfs(
'A',
lambda vertex: vertex == 'C',
lambda vertex: graph.neighbors(vertex)
)
print("Von A nach C mit Tiefensuche:")
print(get_path(a_to_c_1))
a_to_c_2 = bfs(
'A',
lambda vertex: vertex == 'C',
lambda vertex: graph.neighbors(vertex)
)
print("Von A nach C mit Breitensuche:")
print(get_path(a_to_c_2))
| from graph import Graph
from node_search import get_path, dfs, bfs
graph = Graph()
graph = Graph()
graph.add_vertex('A')
graph.add_vertex('B')
graph.add_vertex('C')
graph.add_vertex('D')
graph.add_vertex('E')
graph.add_edge('A', 'B')
graph.add_edge('A', 'D')
graph.add_edge('B', 'C')
graph.add_edge('B', 'D')
graph.add_edge('B', 'E')
graph.add_edge('C', 'E')
graph.add_edge('D', 'E')
a_to_c_1 = dfs(
'A',
lambda vertex: vertex == 'C',
lambda vertex: graph.neighbors(vertex)
)
print("Von A nach C mit Tiefensuche:")
print(get_path(a_to_c_1))
a_to_c_2 = bfs(
'A',
lambda vertex: vertex == 'C',
lambda vertex: graph.neighbors(vertex)
)
print("Von A nach C mit Breitensuche:")
print(get_path(a_to_c_2))
| none | 1 | 3.677814 | 4 | |
packs/openstack/actions/glance.py | jonico/st2contrib | 5 | 6616975 | #!/usr/bin/python
import sys
from lib import openstack
from glanceclient import Client
ostack = openstack.OpenStack('config.yaml')
token = ostack.getToken()
ep = ostack.endpoints['glance']
client = Client('1', endpoint=ep, token=token)
action = ostack.run(client, sys.argv)
print(action)
# results = {sys.argv[0]: []}
#
# if hasattr(action, '__getitem__'):
# for result in action:
# results[sys.argv[0]].append(result)
# else:
# results[sys.argv[0]] = action.to_dict()
#
# print json.dumps(results)
| #!/usr/bin/python
import sys
from lib import openstack
from glanceclient import Client
ostack = openstack.OpenStack('config.yaml')
token = ostack.getToken()
ep = ostack.endpoints['glance']
client = Client('1', endpoint=ep, token=token)
action = ostack.run(client, sys.argv)
print(action)
# results = {sys.argv[0]: []}
#
# if hasattr(action, '__getitem__'):
# for result in action:
# results[sys.argv[0]].append(result)
# else:
# results[sys.argv[0]] = action.to_dict()
#
# print json.dumps(results)
| en | 0.305485 | #!/usr/bin/python # results = {sys.argv[0]: []} # # if hasattr(action, '__getitem__'): # for result in action: # results[sys.argv[0]].append(result) # else: # results[sys.argv[0]] = action.to_dict() # # print json.dumps(results) | 2.463028 | 2 |
src/lattice_structures.py | tomogwen/fpdcluster | 10 | 6616976 | <gh_stars>1-10
import clustering
import numpy as np
import matplotlib.pyplot as plt
import dionysus as dion
import file_utils
def plot_diagram(dgm, ax=False, show=False, labels=False, line_style=None, pt_style=None, lims=False):
# taken from Dionysus2 package
line_kwargs = {}
pt_kwargs = {}
if pt_style is not None:
pt_kwargs.update(pt_style)
if line_style is not None:
line_kwargs.update(line_style)
inf = float('inf')
if ax==False:
ax = plt.axes()
ax.set_aspect('equal', 'datalim')
if lims==False:
min_birth = min(p.birth for p in dgm if p.birth != inf)
max_birth = max(p.birth for p in dgm if p.birth != inf)
min_death = min(p.death for p in dgm if p.death != inf)
max_death = max(p.death for p in dgm if p.death != inf)
else:
min_birth = lims[0]
max_birth = lims[1]
min_death = lims[2]
max_death = lims[3]
ax.set_aspect('equal', 'datalim')
min_diag = min(min_birth, min_death)
max_diag = max(max_birth, max_death)
ax.scatter([p.birth for p in dgm], [p.death for p in dgm], **pt_kwargs)
ax.plot([min_diag, max_diag], [min_diag, max_diag], **line_kwargs)
ax.set_xlabel('birth')
ax.set_ylabel('death')
def plot_all(data, diagrams):
fig = plt.figure(figsize=(20, 10))
for i in range(len(data)):
num = 241 + i
ax = plt.subplot(num)
plt.scatter(data[i][:, 0], data[i][:, 1])
ax = plt.subplot(num + 4)
plot_diagram(diagrams[i], ax, lims=[0, 1.5, 0, 1.75])
fig.suptitle("Datasets with corresponding persistence diagrams")
plt.show()
def compute_diagrams(data, k_filt=3, to_return=2):
diagrams = []
for i in range(len(data)):
# print("Processing data: " + str(i) + "\n")
filtration = dion.fill_rips(data[i], k_filt, 3.0)
homology = dion.homology_persistence(filtration)
diagram = dion.init_diagrams(homology, filtration)
diagrams.append(diagram[to_return])
return diagrams
def plot_clusters(M):
plt.scatter(M[0].T[0], M[0].T[1], c='r', label='Rings')
plt.scatter(M[1].T[0], M[1].T[1], c='b', label='Noise')
plt.xlim([0, 1.5])
plt.ylim([0, 1.75])
plt.plot([0.1, 1.2], [0.1, 1.2])
plt.legend()
plt.title("Persistence Diagram Cluster Centres")
plt.show()
def add_noise(data, noise):
for i in range(len(data)):
for j in range(len(data[i])):
data[i][j][0] += noise * np.random.normal()
data[i][j][1] += noise * np.random.normal()
data[i][j][2] += noise * np.random.normal()
return data
def reflect(data, axis=0):
datax = data.T[0]
datay = data.T[1]
dataz = data.T[2]
if axis == 0:
data = np.array([datay, datax, dataz]).T
elif axis == 1:
data = np.array([datax, dataz, datay]).T
return data
def translate(data, amount):
return data + amount
def rotate(data, degrees, axis=0):
theta = np.radians(degrees)
c, s = np.cos(theta), np.sin(theta)
if axis == 0:
R = np.array(((1, 0, 0), (0, c, -s), (0, s, c)))
elif axis == 1:
R = np.array(((c, 0, s), (0, 1, 0), (-s, 0, c)))
else:
R = np.array(((c, -s, 0), (s, c, 0), (0, 0, 1)))
return np.dot(data, R.T)
def apply_transformations(data1, data2, transforms):
data = np.array([data2, data1, data1, data1, data2, data2])
for i in range(len(transforms)):
if transforms[i] == 0:
data[1] = reflect(data[1], axis=0)
data[2] = reflect(data[2], axis=1)
data[4] = reflect(data[4], axis=0)
data[5] = reflect(data[5], axis=1)
if transforms[i] == 1:
data[1] = rotate(data[1], degrees=180, axis=0)
data[2] = rotate(data[2], degrees=180, axis=1)
data[4] = rotate(data[4], degrees=180, axis=0)
data[5] = rotate(data[5], degrees=180, axis=1)
if transforms[i] == 2:
data[1] = translate(data[1], 5)
data[2] = translate(data[2], -5)
data[4] = translate(data[4], 5)
data[5] = translate(data[5], -5)
return data
if __name__ == '__main__':
for i in range(2):
seed = 0
data = []
np.random.seed(0)
if i == 0:
data1 = np.genfromtxt('data/cubic_structures/bcc/bcc.csv', delimiter=',')
data2 = np.genfromtxt('data/cubic_structures/fcc/fcc.csv', delimiter=',')
print("\n--- CUBIC STRUCTURES DATA ---\n")
else:
data1 = np.genfromtxt('data/carbon_allotropes/dia/dia.csv', delimiter=',')
data2 = np.genfromtxt('data/carbon_allotropes/ths3/ths3.csv', delimiter=',')
print("\n--- CARBON ALLOTROPES DATA ---\n")
# transformations key
# 0 - reflection
# 1 - rotation
# 2 - translation
transformations = [[], [0], [1], [2]]
for i in range(len(transformations)):
print("Case: " + str(i) + ", Transformations: " + str(transformations[i]))
data = apply_transformations(data1, data2, transformations[i])
# file_utils.save_as_d2(data, i)
diagrams = compute_diagrams(data, k_filt=3, to_return=2)
diagrams_cluster = clustering.reformat_diagrams(diagrams)
r, M = clustering.pd_fuzzy(diagrams_cluster, 2, verbose=False, max_iter=5)
# rearrange r to match data order for interpretation
r[[0, 3], :] = r[[3, 0], :]
if i == 0:
t = 'no transformation'
elif i == 1:
t = 'reflection'
elif i == 2:
t = 'rotation'
elif i == 3:
t = 'translation'
else:
t = ''
print("Membership values for " + t + ":")
print(r)
c1 = r[0][0] > 0.5 and r[1][0] > 0.5 and r[2][0] > 0.5 and r[3][1] > 0.5 and r[4][1] > 0.5 and r[5][1] > 0.5
c2 = r[0][1] > 0.5 and r[1][1] > 0.5 and r[2][1] > 0.5 and r[3][0] > 0.5 and r[4][0] > 0.5 and r[5][0] > 0.5
if c1 or c2:
print("Clustered Succesfully\n")
else:
print("Clustered Unsuccesfully\n")
| import clustering
import numpy as np
import matplotlib.pyplot as plt
import dionysus as dion
import file_utils
def plot_diagram(dgm, ax=False, show=False, labels=False, line_style=None, pt_style=None, lims=False):
# taken from Dionysus2 package
line_kwargs = {}
pt_kwargs = {}
if pt_style is not None:
pt_kwargs.update(pt_style)
if line_style is not None:
line_kwargs.update(line_style)
inf = float('inf')
if ax==False:
ax = plt.axes()
ax.set_aspect('equal', 'datalim')
if lims==False:
min_birth = min(p.birth for p in dgm if p.birth != inf)
max_birth = max(p.birth for p in dgm if p.birth != inf)
min_death = min(p.death for p in dgm if p.death != inf)
max_death = max(p.death for p in dgm if p.death != inf)
else:
min_birth = lims[0]
max_birth = lims[1]
min_death = lims[2]
max_death = lims[3]
ax.set_aspect('equal', 'datalim')
min_diag = min(min_birth, min_death)
max_diag = max(max_birth, max_death)
ax.scatter([p.birth for p in dgm], [p.death for p in dgm], **pt_kwargs)
ax.plot([min_diag, max_diag], [min_diag, max_diag], **line_kwargs)
ax.set_xlabel('birth')
ax.set_ylabel('death')
def plot_all(data, diagrams):
fig = plt.figure(figsize=(20, 10))
for i in range(len(data)):
num = 241 + i
ax = plt.subplot(num)
plt.scatter(data[i][:, 0], data[i][:, 1])
ax = plt.subplot(num + 4)
plot_diagram(diagrams[i], ax, lims=[0, 1.5, 0, 1.75])
fig.suptitle("Datasets with corresponding persistence diagrams")
plt.show()
def compute_diagrams(data, k_filt=3, to_return=2):
diagrams = []
for i in range(len(data)):
# print("Processing data: " + str(i) + "\n")
filtration = dion.fill_rips(data[i], k_filt, 3.0)
homology = dion.homology_persistence(filtration)
diagram = dion.init_diagrams(homology, filtration)
diagrams.append(diagram[to_return])
return diagrams
def plot_clusters(M):
plt.scatter(M[0].T[0], M[0].T[1], c='r', label='Rings')
plt.scatter(M[1].T[0], M[1].T[1], c='b', label='Noise')
plt.xlim([0, 1.5])
plt.ylim([0, 1.75])
plt.plot([0.1, 1.2], [0.1, 1.2])
plt.legend()
plt.title("Persistence Diagram Cluster Centres")
plt.show()
def add_noise(data, noise):
for i in range(len(data)):
for j in range(len(data[i])):
data[i][j][0] += noise * np.random.normal()
data[i][j][1] += noise * np.random.normal()
data[i][j][2] += noise * np.random.normal()
return data
def reflect(data, axis=0):
datax = data.T[0]
datay = data.T[1]
dataz = data.T[2]
if axis == 0:
data = np.array([datay, datax, dataz]).T
elif axis == 1:
data = np.array([datax, dataz, datay]).T
return data
def translate(data, amount):
return data + amount
def rotate(data, degrees, axis=0):
theta = np.radians(degrees)
c, s = np.cos(theta), np.sin(theta)
if axis == 0:
R = np.array(((1, 0, 0), (0, c, -s), (0, s, c)))
elif axis == 1:
R = np.array(((c, 0, s), (0, 1, 0), (-s, 0, c)))
else:
R = np.array(((c, -s, 0), (s, c, 0), (0, 0, 1)))
return np.dot(data, R.T)
def apply_transformations(data1, data2, transforms):
data = np.array([data2, data1, data1, data1, data2, data2])
for i in range(len(transforms)):
if transforms[i] == 0:
data[1] = reflect(data[1], axis=0)
data[2] = reflect(data[2], axis=1)
data[4] = reflect(data[4], axis=0)
data[5] = reflect(data[5], axis=1)
if transforms[i] == 1:
data[1] = rotate(data[1], degrees=180, axis=0)
data[2] = rotate(data[2], degrees=180, axis=1)
data[4] = rotate(data[4], degrees=180, axis=0)
data[5] = rotate(data[5], degrees=180, axis=1)
if transforms[i] == 2:
data[1] = translate(data[1], 5)
data[2] = translate(data[2], -5)
data[4] = translate(data[4], 5)
data[5] = translate(data[5], -5)
return data
if __name__ == '__main__':
for i in range(2):
seed = 0
data = []
np.random.seed(0)
if i == 0:
data1 = np.genfromtxt('data/cubic_structures/bcc/bcc.csv', delimiter=',')
data2 = np.genfromtxt('data/cubic_structures/fcc/fcc.csv', delimiter=',')
print("\n--- CUBIC STRUCTURES DATA ---\n")
else:
data1 = np.genfromtxt('data/carbon_allotropes/dia/dia.csv', delimiter=',')
data2 = np.genfromtxt('data/carbon_allotropes/ths3/ths3.csv', delimiter=',')
print("\n--- CARBON ALLOTROPES DATA ---\n")
# transformations key
# 0 - reflection
# 1 - rotation
# 2 - translation
transformations = [[], [0], [1], [2]]
for i in range(len(transformations)):
print("Case: " + str(i) + ", Transformations: " + str(transformations[i]))
data = apply_transformations(data1, data2, transformations[i])
# file_utils.save_as_d2(data, i)
diagrams = compute_diagrams(data, k_filt=3, to_return=2)
diagrams_cluster = clustering.reformat_diagrams(diagrams)
r, M = clustering.pd_fuzzy(diagrams_cluster, 2, verbose=False, max_iter=5)
# rearrange r to match data order for interpretation
r[[0, 3], :] = r[[3, 0], :]
if i == 0:
t = 'no transformation'
elif i == 1:
t = 'reflection'
elif i == 2:
t = 'rotation'
elif i == 3:
t = 'translation'
else:
t = ''
print("Membership values for " + t + ":")
print(r)
c1 = r[0][0] > 0.5 and r[1][0] > 0.5 and r[2][0] > 0.5 and r[3][1] > 0.5 and r[4][1] > 0.5 and r[5][1] > 0.5
c2 = r[0][1] > 0.5 and r[1][1] > 0.5 and r[2][1] > 0.5 and r[3][0] > 0.5 and r[4][0] > 0.5 and r[5][0] > 0.5
if c1 or c2:
print("Clustered Succesfully\n")
else:
print("Clustered Unsuccesfully\n") | en | 0.455513 | # taken from Dionysus2 package # print("Processing data: " + str(i) + "\n") # transformations key # 0 - reflection # 1 - rotation # 2 - translation # file_utils.save_as_d2(data, i) # rearrange r to match data order for interpretation | 2.736181 | 3 |
brainfrick.py | leomwilson/brainfrick | 0 | 6616977 | <reponame>leomwilson/brainfrick
#/bin/env python
import fileinput
colours = {
'white': '\033[0m',
'red': '\033[31m',
'green': '\033[32m',
'orange': '\033[33m',
'blue': '\033[34m',
'purple': '\033[35m'
}
c = []
l = []
arr = [0]
pointer: int = 0
out = ''
for ln in fileinput.input(): # get a list of the executeable characters in the file
for ch in ln:
if ch in "+-<>[],.":
c.append(ch)
print(colours['green'] + 'Compressed code: ' + colours['blue'] + ''.join(c) + colours['white'])
stop_all = input(colours['green'] + 'Stop for input/output only [y/N]?' + colours['white']).lower() != 'y'
i = 0 # the for loop might not look python-y, but this is neccessary for loops
while i < len(c): # interpret the file, step by step
ch = c[i]
print(colours['green'] + 'Running char: ' + colours['blue'] + ch + colours['green'] + ' (' + colours['orange'] + str(i) + colours['green'] + ') [' + colours['orange'] + str(len(l)) + colours['green'] + ']' +colours['white'])
if ch == '>':
pointer += 1
if pointer == len(arr): # prevent IndexError
arr.append(0)
elif ch == '<':
pointer -= 1
if pointer < 0:
print(colours['red'] + 'Invalid syntax: pointer cannot be negative')
elif ch == '+':
arr[pointer] += 1
elif ch == '-':
arr[pointer] -= 1
elif ch == '[':
l.append(i + 1)
elif ch == ']':
if arr[pointer] <= 0:
del l[-1]
else:
i = l[-1] - 1 # -1 makes it +0 when i += 1
elif ch == ',':
val = input(colours['green'] + 'Input (#XXX for a decimal number): ' + colours['white'])
if(val[0] == '#' and len(val) > 1): # handle numbers
arr[pointer] += int(val[1:])
else: # ascii value of char
arr[pointer] += ord(val[0])
elif ch == '.':
out += chr(arr[pointer])
print(colours['green'] + 'Output: ' + colours['white'] + chr(arr[pointer]) + colours['orange'] + ' (' + str(arr[pointer]) + ')' + colours['white'])
print(colours['green'] + 'Values (anything not listed is zero):' + colours['white'])
r = ''
for p, v in enumerate(arr):
if v != 0 or p == pointer:
r += colours['green'] + ('' if r == '' else ' | ') + (colours['purple'] + '*' + colours['green'] if p == pointer else '') + 'arr[' + colours['orange'] + str(p) + colours['green'] + '] = ' + colours['orange'] + str(v)
print(r + colours['white'])
if stop_all or ch == '.':
input(colours['purple'] + 'Press enter to continue.' + colours['white'])
i += 1
print(colours['green'] + 'Program done. Output: ' + colours['white'] + out)
input(colours['purple'] + 'Press enter to continue.' + colours['white']) | #/bin/env python
import fileinput
colours = {
'white': '\033[0m',
'red': '\033[31m',
'green': '\033[32m',
'orange': '\033[33m',
'blue': '\033[34m',
'purple': '\033[35m'
}
c = []
l = []
arr = [0]
pointer: int = 0
out = ''
for ln in fileinput.input(): # get a list of the executeable characters in the file
for ch in ln:
if ch in "+-<>[],.":
c.append(ch)
print(colours['green'] + 'Compressed code: ' + colours['blue'] + ''.join(c) + colours['white'])
stop_all = input(colours['green'] + 'Stop for input/output only [y/N]?' + colours['white']).lower() != 'y'
i = 0 # the for loop might not look python-y, but this is neccessary for loops
while i < len(c): # interpret the file, step by step
ch = c[i]
print(colours['green'] + 'Running char: ' + colours['blue'] + ch + colours['green'] + ' (' + colours['orange'] + str(i) + colours['green'] + ') [' + colours['orange'] + str(len(l)) + colours['green'] + ']' +colours['white'])
if ch == '>':
pointer += 1
if pointer == len(arr): # prevent IndexError
arr.append(0)
elif ch == '<':
pointer -= 1
if pointer < 0:
print(colours['red'] + 'Invalid syntax: pointer cannot be negative')
elif ch == '+':
arr[pointer] += 1
elif ch == '-':
arr[pointer] -= 1
elif ch == '[':
l.append(i + 1)
elif ch == ']':
if arr[pointer] <= 0:
del l[-1]
else:
i = l[-1] - 1 # -1 makes it +0 when i += 1
elif ch == ',':
val = input(colours['green'] + 'Input (#XXX for a decimal number): ' + colours['white'])
if(val[0] == '#' and len(val) > 1): # handle numbers
arr[pointer] += int(val[1:])
else: # ascii value of char
arr[pointer] += ord(val[0])
elif ch == '.':
out += chr(arr[pointer])
print(colours['green'] + 'Output: ' + colours['white'] + chr(arr[pointer]) + colours['orange'] + ' (' + str(arr[pointer]) + ')' + colours['white'])
print(colours['green'] + 'Values (anything not listed is zero):' + colours['white'])
r = ''
for p, v in enumerate(arr):
if v != 0 or p == pointer:
r += colours['green'] + ('' if r == '' else ' | ') + (colours['purple'] + '*' + colours['green'] if p == pointer else '') + 'arr[' + colours['orange'] + str(p) + colours['green'] + '] = ' + colours['orange'] + str(v)
print(r + colours['white'])
if stop_all or ch == '.':
input(colours['purple'] + 'Press enter to continue.' + colours['white'])
i += 1
print(colours['green'] + 'Program done. Output: ' + colours['white'] + out)
input(colours['purple'] + 'Press enter to continue.' + colours['white']) | en | 0.723145 | #/bin/env python # get a list of the executeable characters in the file # the for loop might not look python-y, but this is neccessary for loops # interpret the file, step by step # prevent IndexError # -1 makes it +0 when i += 1 #XXX for a decimal number): ' + colours['white']) # handle numbers # ascii value of char | 3.125352 | 3 |
newhope/__init__.py | kpdemetriou/newhope-cffi | 1 | 6616978 | <filename>newhope/__init__.py
import newhope.cca as cca
import newhope.cpa as cpa
from .params import *
from .cca import generate_keypair, encrypt, decrypt
| <filename>newhope/__init__.py
import newhope.cca as cca
import newhope.cpa as cpa
from .params import *
from .cca import generate_keypair, encrypt, decrypt
| none | 1 | 1.172178 | 1 | |
tests/test_bam_util.py | zztin/SingleCellMultiOmics | 17 | 6616979 | <reponame>zztin/SingleCellMultiOmics
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
import singlecellmultiomics.molecule
import singlecellmultiomics.fragment
import pysam
import pysamiterators.iterators
from singlecellmultiomics.bamProcessing import sorted_bam_file,write_program_tag,verify_and_fix_bam
from singlecellmultiomics.bamProcessing.bamExtractSamples import extract_samples
import os
import sys
from shutil import copyfile,rmtree
from singlecellmultiomics.bamProcessing import get_contigs_with_reads
from singlecellmultiomics.utils.sequtils import pick_best_base_call
class TestFunctions(unittest.TestCase):
def test_get_contigs_with_reads_1(self):
cwr = list(get_contigs_with_reads('./data/mini_nla_test.bam'))
self.assertEqual(len(cwr), 1)
self.assertIn('chr1', cwr)
def test_get_contigs_with_reads_2(self):
cwr = list(get_contigs_with_reads('./data/chic_test_region.bam'))
self.assertEqual(len(cwr), 1)
self.assertIn('8', cwr)
class TestSorted(unittest.TestCase):
def test_verify_and_fix_bam_autoindex(self):
file_path_without_index = './data/temp_without_index.bam'
copyfile('./data/mini_nla_test.bam',file_path_without_index)
try:
os.remove(file_path_without_index+'.bai')
except Exception as e:
pass
verify_and_fix_bam(file_path_without_index)
self.assertTrue( os.path.exists(file_path_without_index+'.bai') )
with pysam.AlignmentFile(file_path_without_index) as f:
i =0
# Test if the file has reads.
for read in f.fetch(contig='chr1'):
if read.is_read1:
i+=1
self.assertEqual(i, 293)
try:
os.remove(file_path_without_index)
except Exception as e:
pass
try:
os.remove(file_path_without_index+'.bai')
except Exception as e:
pass
def test_write_to_sorted(self):
write_path = './data/write_test.bam'
with pysam.AlignmentFile('./data/mini_nla_test.bam') as f:
with sorted_bam_file(write_path, origin_bam=f) as out:
for molecule in singlecellmultiomics.molecule.MoleculeIterator(
alignments=f,
molecule_class=singlecellmultiomics.molecule.NlaIIIMolecule,
fragment_class=singlecellmultiomics.fragment.NlaIIIFragment,
fragment_class_args={'umi_hamming_distance':0},
pooling_method=0,
yield_invalid=True
):
molecule.write_pysam(out)
self.assertTrue(os.path.exists(write_path))
try:
os.remove(write_path)
os.remove(write_path+'.bai')
except Exception as e:
pass
def test_write_to_sorted_custom_compression(self):
write_path = './data/write_test.bam'
with pysam.AlignmentFile('./data/mini_nla_test.bam') as f:
with sorted_bam_file(write_path, origin_bam=f,fast_compression=True) as out:
for molecule in singlecellmultiomics.molecule.MoleculeIterator(
alignments=f,
molecule_class=singlecellmultiomics.molecule.NlaIIIMolecule,
fragment_class=singlecellmultiomics.fragment.NlaIIIFragment,
fragment_class_args={'umi_hamming_distance':0},
pooling_method=0,
yield_invalid=True
):
molecule.write_pysam(out)
self.assertTrue(os.path.exists(write_path))
try:
os.remove(write_path)
os.remove(write_path+'.bai')
except Exception as e:
pass
def test_write_to_sorted_non_existing_folder(self):
write_folder = './data/non_yet_existing_folder/'
write_path = write_folder + 'write_test.bam'
if os.path.exists(write_path):
os.remove(write_path)
rmtree(write_folder, ignore_errors=True)
with pysam.AlignmentFile('./data/mini_nla_test.bam') as f:
with sorted_bam_file(write_path, origin_bam=f) as out:
for molecule in singlecellmultiomics.molecule.MoleculeIterator(
alignments=f,
molecule_class=singlecellmultiomics.molecule.NlaIIIMolecule,
fragment_class=singlecellmultiomics.fragment.NlaIIIFragment,
fragment_class_args={'umi_hamming_distance':0},
pooling_method=0,
yield_invalid=True
):
molecule.write_pysam(out)
self.assertTrue(os.path.exists(write_path))
with pysam.AlignmentFile(write_path) as f:
i =0
# Test if the file has reads.
for read in f:
if read.is_read1:
i+=1
self.assertEqual(i, 293)
try:
os.remove(write_path)
os.remove(write_path+'.bai')
except Exception as e:
pass
rmtree(write_folder, ignore_errors=True)
def test_write_to_read_grouped_sorted(self):
write_path = './data/write_test_rg.bam'
read_groups = set()
with pysam.AlignmentFile('./data/mini_nla_test.bam') as f:
input_header = f.header.as_dict()
write_program_tag(input_header,
program_name='test_bam_util_test1',
command_line = " ".join(sys.argv),
version = singlecellmultiomics.__version__,
description = f'a description'
)
write_program_tag(input_header,
program_name='test_bam_util_test2',
command_line = " ".join(sys.argv),
version = singlecellmultiomics.__version__,
description = f'a description'
)
#print([x for x in input_header['PG'] if not 'bwa mem' in x.get('CL','')])
with sorted_bam_file(write_path, header=input_header,read_groups=read_groups) as out:
for molecule in singlecellmultiomics.molecule.MoleculeIterator(
alignments=f,
molecule_class=singlecellmultiomics.molecule.NlaIIIMolecule,
fragment_class=singlecellmultiomics.fragment.NlaIIIFragment,
fragment_class_args={'umi_hamming_distance':0},
pooling_method=0,
yield_invalid=True
):
molecule.write_pysam(out)
for frag in molecule:
read_groups.add( frag.get_read_group() )
self.assertTrue(os.path.exists(write_path))
# Now test if the program tag is there...
with pysam.AlignmentFile(write_path) as f:
self.assertTrue( 1==len([x for x in f.header['PG'] if 'test_bam_util_test1' in x.get('PN','')]) )
self.assertTrue( 1==len([x for x in f.header['PG'] if 'test_bam_util_test2' in x.get('PN','')]) )
i =0
# Test if the file has reads.
for read in f:
if read.is_read1:
i+=1
self.assertEqual(i, 293)
try:
os.remove(write_path)
except Exception as e:
pass
try:
os.remove(write_path+'.bai')
except Exception as e:
pass
def test_sample_extraction(self):
output_path= './data/write_test_extract.bam'
final_output_paths= ['./data/write_test_extract0.bam', './data/write_test_extract1.bam']
capture_samples = {'0':['APKS1P25-NLAP2L1_30','APKS1P25-NLAP2L1_31'],
# 4 reads expected
'1':['APKS1P25-NLAP2L2_86']}
#samtools view ./data/mini_nla_test.bam | grep 'APKS1P25-NLAP2L2_86' | wc -l -> 8
extract_samples( './data/mini_nla_test.bam', output_path, capture_samples )
with pysam.AlignmentFile(final_output_paths[0]) as f:
for i,_ in enumerate(f):
pass
self.assertEqual(i, 4-1)
with pysam.AlignmentFile(final_output_paths[1]) as f:
for i,_ in enumerate(f):
pass
self.assertEqual(i, 8-1)
for p in final_output_paths:
try:
os.remove(p)
os.remove(f'{p}.bai')
except Exception as e:
pass
class TestBaseCalling(unittest.TestCase):
def test_pick_best(self):
self.assertTrue( pick_best_base_call( ('A',32), ('C',22) ) == ('A', 32) )
self.assertTrue( pick_best_base_call( ('C',22), ('A',32) ) == ('A', 32))
self.assertTrue( pick_best_base_call( ('C',32), ('A',32) ) == ('N',0) )
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
import singlecellmultiomics.molecule
import singlecellmultiomics.fragment
import pysam
import pysamiterators.iterators
from singlecellmultiomics.bamProcessing import sorted_bam_file,write_program_tag,verify_and_fix_bam
from singlecellmultiomics.bamProcessing.bamExtractSamples import extract_samples
import os
import sys
from shutil import copyfile,rmtree
from singlecellmultiomics.bamProcessing import get_contigs_with_reads
from singlecellmultiomics.utils.sequtils import pick_best_base_call
class TestFunctions(unittest.TestCase):
def test_get_contigs_with_reads_1(self):
cwr = list(get_contigs_with_reads('./data/mini_nla_test.bam'))
self.assertEqual(len(cwr), 1)
self.assertIn('chr1', cwr)
def test_get_contigs_with_reads_2(self):
cwr = list(get_contigs_with_reads('./data/chic_test_region.bam'))
self.assertEqual(len(cwr), 1)
self.assertIn('8', cwr)
class TestSorted(unittest.TestCase):
def test_verify_and_fix_bam_autoindex(self):
file_path_without_index = './data/temp_without_index.bam'
copyfile('./data/mini_nla_test.bam',file_path_without_index)
try:
os.remove(file_path_without_index+'.bai')
except Exception as e:
pass
verify_and_fix_bam(file_path_without_index)
self.assertTrue( os.path.exists(file_path_without_index+'.bai') )
with pysam.AlignmentFile(file_path_without_index) as f:
i =0
# Test if the file has reads.
for read in f.fetch(contig='chr1'):
if read.is_read1:
i+=1
self.assertEqual(i, 293)
try:
os.remove(file_path_without_index)
except Exception as e:
pass
try:
os.remove(file_path_without_index+'.bai')
except Exception as e:
pass
def test_write_to_sorted(self):
write_path = './data/write_test.bam'
with pysam.AlignmentFile('./data/mini_nla_test.bam') as f:
with sorted_bam_file(write_path, origin_bam=f) as out:
for molecule in singlecellmultiomics.molecule.MoleculeIterator(
alignments=f,
molecule_class=singlecellmultiomics.molecule.NlaIIIMolecule,
fragment_class=singlecellmultiomics.fragment.NlaIIIFragment,
fragment_class_args={'umi_hamming_distance':0},
pooling_method=0,
yield_invalid=True
):
molecule.write_pysam(out)
self.assertTrue(os.path.exists(write_path))
try:
os.remove(write_path)
os.remove(write_path+'.bai')
except Exception as e:
pass
def test_write_to_sorted_custom_compression(self):
write_path = './data/write_test.bam'
with pysam.AlignmentFile('./data/mini_nla_test.bam') as f:
with sorted_bam_file(write_path, origin_bam=f,fast_compression=True) as out:
for molecule in singlecellmultiomics.molecule.MoleculeIterator(
alignments=f,
molecule_class=singlecellmultiomics.molecule.NlaIIIMolecule,
fragment_class=singlecellmultiomics.fragment.NlaIIIFragment,
fragment_class_args={'umi_hamming_distance':0},
pooling_method=0,
yield_invalid=True
):
molecule.write_pysam(out)
self.assertTrue(os.path.exists(write_path))
try:
os.remove(write_path)
os.remove(write_path+'.bai')
except Exception as e:
pass
def test_write_to_sorted_non_existing_folder(self):
write_folder = './data/non_yet_existing_folder/'
write_path = write_folder + 'write_test.bam'
if os.path.exists(write_path):
os.remove(write_path)
rmtree(write_folder, ignore_errors=True)
with pysam.AlignmentFile('./data/mini_nla_test.bam') as f:
with sorted_bam_file(write_path, origin_bam=f) as out:
for molecule in singlecellmultiomics.molecule.MoleculeIterator(
alignments=f,
molecule_class=singlecellmultiomics.molecule.NlaIIIMolecule,
fragment_class=singlecellmultiomics.fragment.NlaIIIFragment,
fragment_class_args={'umi_hamming_distance':0},
pooling_method=0,
yield_invalid=True
):
molecule.write_pysam(out)
self.assertTrue(os.path.exists(write_path))
with pysam.AlignmentFile(write_path) as f:
i =0
# Test if the file has reads.
for read in f:
if read.is_read1:
i+=1
self.assertEqual(i, 293)
try:
os.remove(write_path)
os.remove(write_path+'.bai')
except Exception as e:
pass
rmtree(write_folder, ignore_errors=True)
def test_write_to_read_grouped_sorted(self):
write_path = './data/write_test_rg.bam'
read_groups = set()
with pysam.AlignmentFile('./data/mini_nla_test.bam') as f:
input_header = f.header.as_dict()
write_program_tag(input_header,
program_name='test_bam_util_test1',
command_line = " ".join(sys.argv),
version = singlecellmultiomics.__version__,
description = f'a description'
)
write_program_tag(input_header,
program_name='test_bam_util_test2',
command_line = " ".join(sys.argv),
version = singlecellmultiomics.__version__,
description = f'a description'
)
#print([x for x in input_header['PG'] if not 'bwa mem' in x.get('CL','')])
with sorted_bam_file(write_path, header=input_header,read_groups=read_groups) as out:
for molecule in singlecellmultiomics.molecule.MoleculeIterator(
alignments=f,
molecule_class=singlecellmultiomics.molecule.NlaIIIMolecule,
fragment_class=singlecellmultiomics.fragment.NlaIIIFragment,
fragment_class_args={'umi_hamming_distance':0},
pooling_method=0,
yield_invalid=True
):
molecule.write_pysam(out)
for frag in molecule:
read_groups.add( frag.get_read_group() )
self.assertTrue(os.path.exists(write_path))
# Now test if the program tag is there...
with pysam.AlignmentFile(write_path) as f:
self.assertTrue( 1==len([x for x in f.header['PG'] if 'test_bam_util_test1' in x.get('PN','')]) )
self.assertTrue( 1==len([x for x in f.header['PG'] if 'test_bam_util_test2' in x.get('PN','')]) )
i =0
# Test if the file has reads.
for read in f:
if read.is_read1:
i+=1
self.assertEqual(i, 293)
try:
os.remove(write_path)
except Exception as e:
pass
try:
os.remove(write_path+'.bai')
except Exception as e:
pass
def test_sample_extraction(self):
output_path= './data/write_test_extract.bam'
final_output_paths= ['./data/write_test_extract0.bam', './data/write_test_extract1.bam']
capture_samples = {'0':['APKS1P25-NLAP2L1_30','APKS1P25-NLAP2L1_31'],
# 4 reads expected
'1':['APKS1P25-NLAP2L2_86']}
#samtools view ./data/mini_nla_test.bam | grep 'APKS1P25-NLAP2L2_86' | wc -l -> 8
extract_samples( './data/mini_nla_test.bam', output_path, capture_samples )
with pysam.AlignmentFile(final_output_paths[0]) as f:
for i,_ in enumerate(f):
pass
self.assertEqual(i, 4-1)
with pysam.AlignmentFile(final_output_paths[1]) as f:
for i,_ in enumerate(f):
pass
self.assertEqual(i, 8-1)
for p in final_output_paths:
try:
os.remove(p)
os.remove(f'{p}.bai')
except Exception as e:
pass
class TestBaseCalling(unittest.TestCase):
def test_pick_best(self):
self.assertTrue( pick_best_base_call( ('A',32), ('C',22) ) == ('A', 32) )
self.assertTrue( pick_best_base_call( ('C',22), ('A',32) ) == ('A', 32))
self.assertTrue( pick_best_base_call( ('C',32), ('A',32) ) == ('N',0) )
if __name__ == '__main__':
unittest.main() | en | 0.791678 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Test if the file has reads. # Test if the file has reads. #print([x for x in input_header['PG'] if not 'bwa mem' in x.get('CL','')]) # Now test if the program tag is there... # Test if the file has reads. # 4 reads expected #samtools view ./data/mini_nla_test.bam | grep 'APKS1P25-NLAP2L2_86' | wc -l -> 8 | 2.073689 | 2 |
arch.py | canplane/SCALE-Sim-PREMA | 0 | 6616980 | import os
import datetime
import configparser as cp
def _t_str():
dt = datetime.datetime.now(datetime.timezone.utc)
return dt.astimezone().strftime("%Y%m%d_%H%M%S")
class Architecture:
def __init__(self, cfg_path=None):
self._load_from_cfg(cfg_path)
if not os.path.exists("./outputs/"):
os.mkdir("./outputs")
####
os.system("rm -rf ./outputs/*")
####
self.out_dir = f"./outputs/{self.name}-{_t_str()}"
os.mkdir(self.out_dir)
#
def _load_from_cfg(self, path):
cfg = cp.ConfigParser()
cfg.read(path)
## Read the run_name
section = 'general'
self.name = cfg.get(section, 'run_name').strip().strip("'\"")
## Read the architecture_presets
section = 'architecture_presets'
self.array = {
'h': int(cfg.get(section, 'ArrayHeight')),
'w': int(cfg.get(section, 'ArrayWidth'))
}
self.sram_sz = {
'ifmap': 1024 * int(cfg.get(section, 'IfmapSramSzkB')),
'filt': 1024 * int(cfg.get(section, 'FilterSramSzkB')),
'ofmap': 1024 * int(cfg.get(section, 'OfmapSramSzkB'))
}
self.base_addr = {
'ifmap': int(cfg.get(section, 'IfmapOffset')),
'filt': int(cfg.get(section, 'FilterOffset')),
'ofmap': int(cfg.get(section, 'OfmapOffset'))
}
self.dataflow = cfg.get(section, 'Dataflow')
#
#
| import os
import datetime
import configparser as cp
def _t_str():
dt = datetime.datetime.now(datetime.timezone.utc)
return dt.astimezone().strftime("%Y%m%d_%H%M%S")
class Architecture:
def __init__(self, cfg_path=None):
self._load_from_cfg(cfg_path)
if not os.path.exists("./outputs/"):
os.mkdir("./outputs")
####
os.system("rm -rf ./outputs/*")
####
self.out_dir = f"./outputs/{self.name}-{_t_str()}"
os.mkdir(self.out_dir)
#
def _load_from_cfg(self, path):
cfg = cp.ConfigParser()
cfg.read(path)
## Read the run_name
section = 'general'
self.name = cfg.get(section, 'run_name').strip().strip("'\"")
## Read the architecture_presets
section = 'architecture_presets'
self.array = {
'h': int(cfg.get(section, 'ArrayHeight')),
'w': int(cfg.get(section, 'ArrayWidth'))
}
self.sram_sz = {
'ifmap': 1024 * int(cfg.get(section, 'IfmapSramSzkB')),
'filt': 1024 * int(cfg.get(section, 'FilterSramSzkB')),
'ofmap': 1024 * int(cfg.get(section, 'OfmapSramSzkB'))
}
self.base_addr = {
'ifmap': int(cfg.get(section, 'IfmapOffset')),
'filt': int(cfg.get(section, 'FilterOffset')),
'ofmap': int(cfg.get(section, 'OfmapOffset'))
}
self.dataflow = cfg.get(section, 'Dataflow')
#
#
| en | 0.456822 | #### #### # ## Read the run_name ## Read the architecture_presets # # | 2.545564 | 3 |
module.py | MistiikDev/PythonStatsModule | 0 | 6616981 | from math import *
## Returns the mean for values of a dictionary
class Mean():
def __init__(self) -> None:
pass
def get(self, a: dict) -> int:
mean = 0
eff = 0
for i in a:
mean += i * a[i]
eff += a[i]
mean /= eff
return mean
## Returns the variance equation and typicalGap for a dictionary
class Variance():
def __init__(self) -> None:
pass
def get(self, a: dict) -> int:
mean = Mean()
var = 0
total = 0
aMean = mean.get(a)
for i in a:
v = a[i]
var += (v*((i-aMean)**2))
total += v
var /= total
return var
def typicalGap(self, a: int) -> int:
return sqrt(a);
## Returns a medianne of a dictionary values
class Medianne():
def __init__(self) -> None:
pass
def get(self, a: dict) -> int:
total = 0
med = 0
b = []
for i in a:
for c in range(int(a[i])):
b.append(i)
b = sorted(b)
total = len(b)
if total % 2 != 0:
med = b[round(total/2)]
else:
med = (b[(round(total/2))] + b[round((total/2)+1)]) / 2
return med
## Checks if a value of a list is inside an intervall defined respectively by x(+-)2o
def CheckListOnI(x, o, l: list) -> int:
start = x - (2*o)
end = x + (2*o)
inList = []
total = 0
for i in l:
if (l[i] >= start and l[i] <= end):
inList.append(l[i])
total += 1
return round(total/len(l) * 100, 3)
| from math import *
## Returns the mean for values of a dictionary
class Mean():
def __init__(self) -> None:
pass
def get(self, a: dict) -> int:
mean = 0
eff = 0
for i in a:
mean += i * a[i]
eff += a[i]
mean /= eff
return mean
## Returns the variance equation and typicalGap for a dictionary
class Variance():
def __init__(self) -> None:
pass
def get(self, a: dict) -> int:
mean = Mean()
var = 0
total = 0
aMean = mean.get(a)
for i in a:
v = a[i]
var += (v*((i-aMean)**2))
total += v
var /= total
return var
def typicalGap(self, a: int) -> int:
return sqrt(a);
## Returns a medianne of a dictionary values
class Medianne():
def __init__(self) -> None:
pass
def get(self, a: dict) -> int:
total = 0
med = 0
b = []
for i in a:
for c in range(int(a[i])):
b.append(i)
b = sorted(b)
total = len(b)
if total % 2 != 0:
med = b[round(total/2)]
else:
med = (b[(round(total/2))] + b[round((total/2)+1)]) / 2
return med
## Checks if a value of a list is inside an intervall defined respectively by x(+-)2o
def CheckListOnI(x, o, l: list) -> int:
start = x - (2*o)
end = x + (2*o)
inList = []
total = 0
for i in l:
if (l[i] >= start and l[i] <= end):
inList.append(l[i])
total += 1
return round(total/len(l) * 100, 3)
| en | 0.516931 | ## Returns the mean for values of a dictionary ## Returns the variance equation and typicalGap for a dictionary ## Returns a medianne of a dictionary values ## Checks if a value of a list is inside an intervall defined respectively by x(+-)2o | 3.48834 | 3 |
third_party/utils/eval_helper.py | jiangwei221/image-matching-benchmark | 271 | 6616982 | import math
import cv2
import numpy as np
def align_model(model, rot, trans, scale):
return (np.matmul(rot, model) + trans) * scale
def align(model, data):
'''
Source: https://vision.in.tum.de/data/datasets/rgbd-dataset/tools
#absolute_trajectory_error_ate
Align two trajectories using the method of Horn (closed-form).
Input:
model -- first trajectory (3xn)
data -- second trajectory (3xn)
Output:
rot -- rotation matrix (3x3)
trans -- translation vector (3x1)
trans_error -- translational error per point (1xn)
'''
if model.shape[1] < 3:
print('Need at least 3 points for ATE: {}'.format(model))
return np.identity(3), np.zeros((3, 1)), 1
# Get zero centered point cloud
model_zerocentered = model - model.mean(1, keepdims=True)
data_zerocentered = data - data.mean(1, keepdims=True)
# constructed covariance matrix
W = np.zeros((3, 3))
for column in range(model.shape[1]):
W += np.outer(model_zerocentered[:, column],
data_zerocentered[:, column])
# SVD
U, d, Vh = np.linalg.linalg.svd(W.transpose())
S = np.identity(3)
if (np.linalg.det(U) * np.linalg.det(Vh) < 0):
S[2, 2] = -1
rot = np.matmul(np.matmul(U, S), Vh)
trans = data.mean(1, keepdims=True) - np.matmul(
rot, model.mean(1, keepdims=True))
# apply rot and trans to point cloud
model_aligned = align_model(model, rot, trans, 1.0)
model_aligned_zerocentered = model_aligned - model_aligned.mean(
1, keepdims=True)
# calc scale based on distance to point cloud center
data_dist = np.sqrt((data_zerocentered * data_zerocentered).sum(axis=0))
model_aligned_dist = np.sqrt(
(model_aligned_zerocentered * model_aligned_zerocentered).sum(axis=0))
scale_array = data_dist / model_aligned_dist
scale = np.median(scale_array)
return rot, trans, scale
def quaternion_matrix(quaternion):
'''Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
'''
q = np.array(quaternion, dtype=np.float64, copy=True)
n = np.dot(q, q)
if n < _EPS:
return np.identity(4)
q *= math.sqrt(2.0 / n)
q = np.outer(q, q)
return np.array(
[[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0], 0.0],
[q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0], 0.0],
[q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2], 0.0],
[0.0, 0.0, 0.0, 1.0]])
def quaternion_from_matrix(matrix, isprecise=False):
'''Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
>>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0)
>>> numpy.allclose(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
'''
M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4]
if isprecise:
q = np.empty((4, ))
t = np.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 1, 2, 3
if M[1, 1] > M[0, 0]:
i, j, k = 2, 3, 1
if M[2, 2] > M[i, i]:
i, j, k = 3, 1, 2
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = np.array([[m00 - m11 - m22, 0.0, 0.0, 0.0],
[m01 + m10, m11 - m00 - m22, 0.0, 0.0],
[m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],
[m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = np.linalg.eigh(K)
q = V[[3, 0, 1, 2], np.argmax(w)]
if q[0] < 0.0:
np.negative(q, q)
return q
| import math
import cv2
import numpy as np
def align_model(model, rot, trans, scale):
return (np.matmul(rot, model) + trans) * scale
def align(model, data):
'''
Source: https://vision.in.tum.de/data/datasets/rgbd-dataset/tools
#absolute_trajectory_error_ate
Align two trajectories using the method of Horn (closed-form).
Input:
model -- first trajectory (3xn)
data -- second trajectory (3xn)
Output:
rot -- rotation matrix (3x3)
trans -- translation vector (3x1)
trans_error -- translational error per point (1xn)
'''
if model.shape[1] < 3:
print('Need at least 3 points for ATE: {}'.format(model))
return np.identity(3), np.zeros((3, 1)), 1
# Get zero centered point cloud
model_zerocentered = model - model.mean(1, keepdims=True)
data_zerocentered = data - data.mean(1, keepdims=True)
# constructed covariance matrix
W = np.zeros((3, 3))
for column in range(model.shape[1]):
W += np.outer(model_zerocentered[:, column],
data_zerocentered[:, column])
# SVD
U, d, Vh = np.linalg.linalg.svd(W.transpose())
S = np.identity(3)
if (np.linalg.det(U) * np.linalg.det(Vh) < 0):
S[2, 2] = -1
rot = np.matmul(np.matmul(U, S), Vh)
trans = data.mean(1, keepdims=True) - np.matmul(
rot, model.mean(1, keepdims=True))
# apply rot and trans to point cloud
model_aligned = align_model(model, rot, trans, 1.0)
model_aligned_zerocentered = model_aligned - model_aligned.mean(
1, keepdims=True)
# calc scale based on distance to point cloud center
data_dist = np.sqrt((data_zerocentered * data_zerocentered).sum(axis=0))
model_aligned_dist = np.sqrt(
(model_aligned_zerocentered * model_aligned_zerocentered).sum(axis=0))
scale_array = data_dist / model_aligned_dist
scale = np.median(scale_array)
return rot, trans, scale
def quaternion_matrix(quaternion):
'''Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
'''
q = np.array(quaternion, dtype=np.float64, copy=True)
n = np.dot(q, q)
if n < _EPS:
return np.identity(4)
q *= math.sqrt(2.0 / n)
q = np.outer(q, q)
return np.array(
[[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0], 0.0],
[q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0], 0.0],
[q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2], 0.0],
[0.0, 0.0, 0.0, 1.0]])
def quaternion_from_matrix(matrix, isprecise=False):
'''Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
>>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0)
>>> numpy.allclose(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
'''
M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4]
if isprecise:
q = np.empty((4, ))
t = np.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 1, 2, 3
if M[1, 1] > M[0, 0]:
i, j, k = 2, 3, 1
if M[2, 2] > M[i, i]:
i, j, k = 3, 1, 2
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = np.array([[m00 - m11 - m22, 0.0, 0.0, 0.0],
[m01 + m10, m11 - m00 - m22, 0.0, 0.0],
[m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],
[m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = np.linalg.eigh(K)
q = V[[3, 0, 1, 2], np.argmax(w)]
if q[0] < 0.0:
np.negative(q, q)
return q
| en | 0.496767 | Source: https://vision.in.tum.de/data/datasets/rgbd-dataset/tools #absolute_trajectory_error_ate Align two trajectories using the method of Horn (closed-form). Input: model -- first trajectory (3xn) data -- second trajectory (3xn) Output: rot -- rotation matrix (3x3) trans -- translation vector (3x1) trans_error -- translational error per point (1xn) # Get zero centered point cloud # constructed covariance matrix # SVD # apply rot and trans to point cloud # calc scale based on distance to point cloud center Return homogeneous rotation matrix from quaternion. >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0]) >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0])) True >>> M = quaternion_matrix([1, 0, 0, 0]) >>> numpy.allclose(M, numpy.identity(4)) True >>> M = quaternion_matrix([0, 1, 0, 0]) >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1])) True Return quaternion from rotation matrix. If isprecise is True, the input matrix is assumed to be a precise rotation matrix and a faster algorithm is used. >>> q = quaternion_from_matrix(numpy.identity(4), True) >>> numpy.allclose(q, [1, 0, 0, 0]) True >>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1])) >>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0]) True >>> R = rotation_matrix(0.123, (1, 2, 3)) >>> q = quaternion_from_matrix(R, True) >>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786]) True >>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0], ... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]] >>> q = quaternion_from_matrix(R) >>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611]) True >>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0], ... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]] >>> q = quaternion_from_matrix(R) >>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603]) True >>> R = random_rotation_matrix() >>> q = quaternion_from_matrix(R) >>> is_same_transform(R, quaternion_matrix(q)) True >>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0) >>> numpy.allclose(quaternion_from_matrix(R, isprecise=False), ... quaternion_from_matrix(R, isprecise=True)) True # symmetric matrix K # quaternion is eigenvector of K that corresponds to largest eigenvalue | 3.315517 | 3 |
experiments/predict_simple/utils.py | Tobias-Fischer/dreyeve | 0 | 6616983 | import numpy as np
import cv2
import os.path as path
# cityscapes dataset palette
palette = np.array([[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32]], dtype='uint8')
def seg_to_colormap(seg, channels_first):
"""
Function to turn segmentation PREDICTION (not probabilities) to colormap.
:param seg: the prediction image, having shape (h,w)
:param channels_first: if true, returns (c,h,w) rather than (h,w,c)
:return: the colormap image, having shape (h,w,3)
"""
h, w = seg.shape
color_image = palette[seg.ravel()].reshape(h, w, 3)
if channels_first:
color_image = color_image.transpose(2, 0, 1)
return color_image
def read_lines_from_file(filename):
"""
Function to read lines from file
:param filename: The text file to be read.
:return: content: A list of strings
"""
with open(filename) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content
def get_branch_from_experiment_id(experiment_id):
"""
Function to return model branch name given experiment_id.
:param experiment_id: experiment id
:return: a string among ['all','image','optical_flow','semseg']
"""
assert isinstance(experiment_id, basestring), "Experiment ID must be a string."
branch = None
if experiment_id.lower().startswith('dreyeve'):
branch = "all"
elif experiment_id.lower().startswith('color'):
branch = "image"
elif experiment_id.lower().startswith('flow'):
branch = "optical_flow"
elif experiment_id.lower().startswith('segm'):
branch = "semseg"
return branch
def read_image(img_path, channels_first, color=True, color_mode='BGR', dtype=np.float32, resize_dim=None):
"""
Reads and returns an image as a numpy array
Parameters
----------
img_path : string
Path of the input image
channels_first: bool
If True, channel dimension is moved in first position
color: bool, optional
If True, image is loaded in color: grayscale otherwise
color_mode: "RGB", "BGR", optional
Whether to load the color image in RGB or BGR format
dtype: dtype, optional
Array is casted to this data type before being returned
resize_dim: tuple, optional
Resize size following convention (new_h, new_w) - interpolation is linear
Returns
-------
image : np.array
Loaded Image as numpy array of type dtype
"""
if not path.exists(img_path):
raise ValueError('Provided path "{}" does NOT exist.'.format(img_path))
image = cv2.imread(img_path, cv2.IMREAD_COLOR if color else cv2.IMREAD_GRAYSCALE)
if color and color_mode == 'RGB':
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if resize_dim is not None:
image = cv2.resize(image, dsize=resize_dim[::-1], interpolation=cv2.INTER_LINEAR)
if color and channels_first:
image = np.transpose(image, (2, 0, 1))
return image.astype(dtype)
def normalize(img):
"""
Normalizes an image between 0 and 255 and returns it as uint8.
Parameters
----------
img : ndarray
Image that has to be normalized
Returns
----------
img : ndarray
The normalized image
"""
img = img.astype(np.float32)
img -= img.min()
img /= img.max()
img *= 255
img = img.astype(np.uint8)
return img
def resize_tensor(tensor, new_shape):
"""
Resize a numeric input 3D tensor with opencv. Each channel is resized independently from the others.
Parameters
----------
tensor: ndarray
Numeric 3D tensor of shape (channels, h, w)
new_shape: tuple
Tuple (new_h, new_w)
Returns
-------
new_tensor: ndarray
Resized tensor having size (channels, new_h, new_w)
"""
channels = tensor.shape[0]
new_tensor = np.zeros(shape=(channels,) + new_shape)
for i in range(0, channels):
new_tensor[i] = cv2.resize(tensor[i], dsize=new_shape[::-1])
return new_tensor
def stitch_together(input_images, layout, resize_dim=None, off_x=None, off_y=None, bg_color=(0, 0, 0)):
"""
Stitch together N input images into a bigger frame, using a grid layout.
Input images can be either color or grayscale, but must all have the same size.
Parameters
----------
input_images : list
List of input images
layout : tuple
Grid layout of the stitch expressed as (rows, cols)
resize_dim : couple
If not None, stitch is resized to this size
off_x : int
Offset between stitched images along x axis
off_y : int
Offset between stitched images along y axis
bg_color : tuple
Color used for background
Returns
-------
stitch : ndarray
Stitch of input images
"""
if len(set([img.shape for img in input_images])) > 1:
raise ValueError('All images must have the same shape')
if len(set([img.dtype for img in input_images])) > 1:
raise ValueError('All images must have the same data type')
# determine if input images are color (3 channels) or grayscale (single channel)
if len(input_images[0].shape) == 2:
mode = 'grayscale'
img_h, img_w = input_images[0].shape
elif len(input_images[0].shape) == 3:
mode = 'color'
img_h, img_w, img_c = input_images[0].shape
else:
raise ValueError('Unknown shape for input images')
# if no offset is provided, set to 10% of image size
if off_x is None:
off_x = img_w // 10
if off_y is None:
off_y = img_h // 10
# create stitch mask
rows, cols = layout
stitch_h = rows * img_h + (rows + 1) * off_y
stitch_w = cols * img_w + (cols + 1) * off_x
if mode == 'color':
bg_color = np.array(bg_color)[None, None, :] # cast to ndarray add singleton dimensions
stitch = np.uint8(np.repeat(np.repeat(bg_color, stitch_h, axis=0), stitch_w, axis=1))
elif mode == 'grayscale':
stitch = np.zeros(shape=(stitch_h, stitch_w), dtype=np.uint8)
for r in range(0, rows):
for c in range(0, cols):
list_idx = r * cols + c
if list_idx < len(input_images):
if mode == 'color':
stitch[r * (off_y + img_h) + off_y: r*(off_y+img_h) + off_y + img_h,
c * (off_x + img_w) + off_x: c * (off_x + img_w) + off_x + img_w,
:] = input_images[list_idx]
elif mode == 'grayscale':
stitch[r * (off_y + img_h) + off_y: r*(off_y+img_h) + off_y + img_h,
c * (off_x + img_w) + off_x: c * (off_x + img_w) + off_x + img_w]\
= input_images[list_idx]
if resize_dim:
stitch = cv2.resize(stitch, dsize=(resize_dim[::-1]))
return stitch
| import numpy as np
import cv2
import os.path as path
# cityscapes dataset palette
palette = np.array([[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32]], dtype='uint8')
def seg_to_colormap(seg, channels_first):
"""
Function to turn segmentation PREDICTION (not probabilities) to colormap.
:param seg: the prediction image, having shape (h,w)
:param channels_first: if true, returns (c,h,w) rather than (h,w,c)
:return: the colormap image, having shape (h,w,3)
"""
h, w = seg.shape
color_image = palette[seg.ravel()].reshape(h, w, 3)
if channels_first:
color_image = color_image.transpose(2, 0, 1)
return color_image
def read_lines_from_file(filename):
"""
Function to read lines from file
:param filename: The text file to be read.
:return: content: A list of strings
"""
with open(filename) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content
def get_branch_from_experiment_id(experiment_id):
"""
Function to return model branch name given experiment_id.
:param experiment_id: experiment id
:return: a string among ['all','image','optical_flow','semseg']
"""
assert isinstance(experiment_id, basestring), "Experiment ID must be a string."
branch = None
if experiment_id.lower().startswith('dreyeve'):
branch = "all"
elif experiment_id.lower().startswith('color'):
branch = "image"
elif experiment_id.lower().startswith('flow'):
branch = "optical_flow"
elif experiment_id.lower().startswith('segm'):
branch = "semseg"
return branch
def read_image(img_path, channels_first, color=True, color_mode='BGR', dtype=np.float32, resize_dim=None):
"""
Reads and returns an image as a numpy array
Parameters
----------
img_path : string
Path of the input image
channels_first: bool
If True, channel dimension is moved in first position
color: bool, optional
If True, image is loaded in color: grayscale otherwise
color_mode: "RGB", "BGR", optional
Whether to load the color image in RGB or BGR format
dtype: dtype, optional
Array is casted to this data type before being returned
resize_dim: tuple, optional
Resize size following convention (new_h, new_w) - interpolation is linear
Returns
-------
image : np.array
Loaded Image as numpy array of type dtype
"""
if not path.exists(img_path):
raise ValueError('Provided path "{}" does NOT exist.'.format(img_path))
image = cv2.imread(img_path, cv2.IMREAD_COLOR if color else cv2.IMREAD_GRAYSCALE)
if color and color_mode == 'RGB':
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if resize_dim is not None:
image = cv2.resize(image, dsize=resize_dim[::-1], interpolation=cv2.INTER_LINEAR)
if color and channels_first:
image = np.transpose(image, (2, 0, 1))
return image.astype(dtype)
def normalize(img):
"""
Normalizes an image between 0 and 255 and returns it as uint8.
Parameters
----------
img : ndarray
Image that has to be normalized
Returns
----------
img : ndarray
The normalized image
"""
img = img.astype(np.float32)
img -= img.min()
img /= img.max()
img *= 255
img = img.astype(np.uint8)
return img
def resize_tensor(tensor, new_shape):
"""
Resize a numeric input 3D tensor with opencv. Each channel is resized independently from the others.
Parameters
----------
tensor: ndarray
Numeric 3D tensor of shape (channels, h, w)
new_shape: tuple
Tuple (new_h, new_w)
Returns
-------
new_tensor: ndarray
Resized tensor having size (channels, new_h, new_w)
"""
channels = tensor.shape[0]
new_tensor = np.zeros(shape=(channels,) + new_shape)
for i in range(0, channels):
new_tensor[i] = cv2.resize(tensor[i], dsize=new_shape[::-1])
return new_tensor
def stitch_together(input_images, layout, resize_dim=None, off_x=None, off_y=None, bg_color=(0, 0, 0)):
"""
Stitch together N input images into a bigger frame, using a grid layout.
Input images can be either color or grayscale, but must all have the same size.
Parameters
----------
input_images : list
List of input images
layout : tuple
Grid layout of the stitch expressed as (rows, cols)
resize_dim : couple
If not None, stitch is resized to this size
off_x : int
Offset between stitched images along x axis
off_y : int
Offset between stitched images along y axis
bg_color : tuple
Color used for background
Returns
-------
stitch : ndarray
Stitch of input images
"""
if len(set([img.shape for img in input_images])) > 1:
raise ValueError('All images must have the same shape')
if len(set([img.dtype for img in input_images])) > 1:
raise ValueError('All images must have the same data type')
# determine if input images are color (3 channels) or grayscale (single channel)
if len(input_images[0].shape) == 2:
mode = 'grayscale'
img_h, img_w = input_images[0].shape
elif len(input_images[0].shape) == 3:
mode = 'color'
img_h, img_w, img_c = input_images[0].shape
else:
raise ValueError('Unknown shape for input images')
# if no offset is provided, set to 10% of image size
if off_x is None:
off_x = img_w // 10
if off_y is None:
off_y = img_h // 10
# create stitch mask
rows, cols = layout
stitch_h = rows * img_h + (rows + 1) * off_y
stitch_w = cols * img_w + (cols + 1) * off_x
if mode == 'color':
bg_color = np.array(bg_color)[None, None, :] # cast to ndarray add singleton dimensions
stitch = np.uint8(np.repeat(np.repeat(bg_color, stitch_h, axis=0), stitch_w, axis=1))
elif mode == 'grayscale':
stitch = np.zeros(shape=(stitch_h, stitch_w), dtype=np.uint8)
for r in range(0, rows):
for c in range(0, cols):
list_idx = r * cols + c
if list_idx < len(input_images):
if mode == 'color':
stitch[r * (off_y + img_h) + off_y: r*(off_y+img_h) + off_y + img_h,
c * (off_x + img_w) + off_x: c * (off_x + img_w) + off_x + img_w,
:] = input_images[list_idx]
elif mode == 'grayscale':
stitch[r * (off_y + img_h) + off_y: r*(off_y+img_h) + off_y + img_h,
c * (off_x + img_w) + off_x: c * (off_x + img_w) + off_x + img_w]\
= input_images[list_idx]
if resize_dim:
stitch = cv2.resize(stitch, dsize=(resize_dim[::-1]))
return stitch
| en | 0.714236 | # cityscapes dataset palette Function to turn segmentation PREDICTION (not probabilities) to colormap. :param seg: the prediction image, having shape (h,w) :param channels_first: if true, returns (c,h,w) rather than (h,w,c) :return: the colormap image, having shape (h,w,3) Function to read lines from file :param filename: The text file to be read. :return: content: A list of strings Function to return model branch name given experiment_id. :param experiment_id: experiment id :return: a string among ['all','image','optical_flow','semseg'] Reads and returns an image as a numpy array Parameters ---------- img_path : string Path of the input image channels_first: bool If True, channel dimension is moved in first position color: bool, optional If True, image is loaded in color: grayscale otherwise color_mode: "RGB", "BGR", optional Whether to load the color image in RGB or BGR format dtype: dtype, optional Array is casted to this data type before being returned resize_dim: tuple, optional Resize size following convention (new_h, new_w) - interpolation is linear Returns ------- image : np.array Loaded Image as numpy array of type dtype Normalizes an image between 0 and 255 and returns it as uint8. Parameters ---------- img : ndarray Image that has to be normalized Returns ---------- img : ndarray The normalized image Resize a numeric input 3D tensor with opencv. Each channel is resized independently from the others. Parameters ---------- tensor: ndarray Numeric 3D tensor of shape (channels, h, w) new_shape: tuple Tuple (new_h, new_w) Returns ------- new_tensor: ndarray Resized tensor having size (channels, new_h, new_w) Stitch together N input images into a bigger frame, using a grid layout. Input images can be either color or grayscale, but must all have the same size. Parameters ---------- input_images : list List of input images layout : tuple Grid layout of the stitch expressed as (rows, cols) resize_dim : couple If not None, stitch is resized to this size off_x : int Offset between stitched images along x axis off_y : int Offset between stitched images along y axis bg_color : tuple Color used for background Returns ------- stitch : ndarray Stitch of input images # determine if input images are color (3 channels) or grayscale (single channel) # if no offset is provided, set to 10% of image size # create stitch mask # cast to ndarray add singleton dimensions | 2.882848 | 3 |
scripts/benchmarks/benchmark_super_mario.py | SamuelNLP/nes-ai | 0 | 6616984 | <filename>scripts/benchmarks/benchmark_super_mario.py
"""
Super Mario session
"""
import logging
import random
from datetime import datetime
from neats.genetic import Genetic, NetworkShape
from neats.network import Network
from nes_py.wrappers import JoypadSpace
from nes_ai.input import MOVEMENT, Button, Joypad, neat_result_to_buttons
from nes_ai.mario.env import SuperMario
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
# constants
BUTTONS_MAP = {
0: Button.LEFT,
1: Button.RIGHT,
2: Button.A,
3: Button.B,
4: Button.DOWN,
5: Button.NONE,
}
THRESHOLD_FRAME = 5
TIMEOUT = 100
BUTTON_THRESHOLD = 0.5
INITIAL_THRESHOLD = 100
RENDER = False
# neats constants
NUMBER_INDIVIDUALS = 10
NETWORK_INPUTS = 169
NETWORK_OUTPUTS = 6
@profile # noqa
def individual_run(individual: Network) -> Network:
"""
An individual run
"""
random.seed(datetime.now())
mario = SuperMario()
player = Joypad(JoypadSpace(mario, MOVEMENT))
while mario.ram[0x0009] < INITIAL_THRESHOLD:
player.press((Button.START,), delay=INITIAL_THRESHOLD)
frame_count = 0
fitness = 0
timeout_ = TIMEOUT
rightmost_mario = 0
while True:
if RENDER:
mario.render()
if mario.is_dying or (timeout_ < 0 and frame_count > TIMEOUT):
individual.fitness = fitness
mario.close()
return individual
# play
features = mario.get_input_array()
instances = individual.evaluate(features)
button_result = neat_result_to_buttons(
instances, BUTTONS_MAP, BUTTON_THRESHOLD # noqa
)
x_mario, _ = mario.get_mario()
if x_mario > rightmost_mario:
timeout_ = TIMEOUT
rightmost_mario = x_mario
if button_result:
player.press(button_result, delay=THRESHOLD_FRAME, replay=True)
else:
player.press((Button.NONE,), delay=0)
# fitness
fitness = int(x_mario - frame_count / 4)
frame_count += 1
timeout_ -= 1
@profile # noqa
def main():
network_shape = NetworkShape(n_inputs=NETWORK_INPUTS, n_outputs=NETWORK_OUTPUTS)
genetic = Genetic(
number_individuals=NUMBER_INDIVIDUALS, network_shape=network_shape
)
for index in range(1):
population = [individual_run(x) for x in genetic.population]
genetic.population = population
logger.info("#----------#")
logger.info(f"Iteration: {index}")
logger.info("#----------#\n")
genetic = genetic.evolve()
if __name__ == "__main__":
main()
| <filename>scripts/benchmarks/benchmark_super_mario.py
"""
Super Mario session
"""
import logging
import random
from datetime import datetime
from neats.genetic import Genetic, NetworkShape
from neats.network import Network
from nes_py.wrappers import JoypadSpace
from nes_ai.input import MOVEMENT, Button, Joypad, neat_result_to_buttons
from nes_ai.mario.env import SuperMario
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
# constants
BUTTONS_MAP = {
0: Button.LEFT,
1: Button.RIGHT,
2: Button.A,
3: Button.B,
4: Button.DOWN,
5: Button.NONE,
}
THRESHOLD_FRAME = 5
TIMEOUT = 100
BUTTON_THRESHOLD = 0.5
INITIAL_THRESHOLD = 100
RENDER = False
# neats constants
NUMBER_INDIVIDUALS = 10
NETWORK_INPUTS = 169
NETWORK_OUTPUTS = 6
@profile # noqa
def individual_run(individual: Network) -> Network:
"""
An individual run
"""
random.seed(datetime.now())
mario = SuperMario()
player = Joypad(JoypadSpace(mario, MOVEMENT))
while mario.ram[0x0009] < INITIAL_THRESHOLD:
player.press((Button.START,), delay=INITIAL_THRESHOLD)
frame_count = 0
fitness = 0
timeout_ = TIMEOUT
rightmost_mario = 0
while True:
if RENDER:
mario.render()
if mario.is_dying or (timeout_ < 0 and frame_count > TIMEOUT):
individual.fitness = fitness
mario.close()
return individual
# play
features = mario.get_input_array()
instances = individual.evaluate(features)
button_result = neat_result_to_buttons(
instances, BUTTONS_MAP, BUTTON_THRESHOLD # noqa
)
x_mario, _ = mario.get_mario()
if x_mario > rightmost_mario:
timeout_ = TIMEOUT
rightmost_mario = x_mario
if button_result:
player.press(button_result, delay=THRESHOLD_FRAME, replay=True)
else:
player.press((Button.NONE,), delay=0)
# fitness
fitness = int(x_mario - frame_count / 4)
frame_count += 1
timeout_ -= 1
@profile # noqa
def main():
network_shape = NetworkShape(n_inputs=NETWORK_INPUTS, n_outputs=NETWORK_OUTPUTS)
genetic = Genetic(
number_individuals=NUMBER_INDIVIDUALS, network_shape=network_shape
)
for index in range(1):
population = [individual_run(x) for x in genetic.population]
genetic.population = population
logger.info("#----------#")
logger.info(f"Iteration: {index}")
logger.info("#----------#\n")
genetic = genetic.evolve()
if __name__ == "__main__":
main()
| en | 0.442422 | Super Mario session # constants # neats constants # noqa An individual run # play # noqa # fitness # noqa #") #\n") | 2.491122 | 2 |
src/personalize/configuration/appearance.py | kishorekolli/deep_racer_guru | 9 | 6616985 | #
# DeepRacer Guru
#
# Version 3.0 onwards
#
# Copyright (c) 2021 dmh23
#
# See: https://matplotlib.org/stable/tutorials/colors/colormaps.html
COLORMAP_A = "viridis"
COLORMAP_B = "inferno"
# See: tkcolors
DISCRETE_THREE_COLOURS = ["Red", "Yellow", "Green"]
DISCRETE_FIVE_COLOURS = ["Red", "Yellow", "Green", "Blue", "White"]
EVENT_HIGHLIGHT_COLOUR = "Orange"
TRUE_HEADING_HIGHLIGHT_COLOUR = "Purple"
RACE_COLOURS = ["red", "orange", "yellow", "green", "blue", "gray", "cornflowerblue", "white", "cyan", "magenta"]
| #
# DeepRacer Guru
#
# Version 3.0 onwards
#
# Copyright (c) 2021 dmh23
#
# See: https://matplotlib.org/stable/tutorials/colors/colormaps.html
COLORMAP_A = "viridis"
COLORMAP_B = "inferno"
# See: tkcolors
DISCRETE_THREE_COLOURS = ["Red", "Yellow", "Green"]
DISCRETE_FIVE_COLOURS = ["Red", "Yellow", "Green", "Blue", "White"]
EVENT_HIGHLIGHT_COLOUR = "Orange"
TRUE_HEADING_HIGHLIGHT_COLOUR = "Purple"
RACE_COLOURS = ["red", "orange", "yellow", "green", "blue", "gray", "cornflowerblue", "white", "cyan", "magenta"]
| en | 0.602673 | # # DeepRacer Guru # # Version 3.0 onwards # # Copyright (c) 2021 dmh23 # # See: https://matplotlib.org/stable/tutorials/colors/colormaps.html # See: tkcolors | 1.898898 | 2 |
Chapter08/subclass.py | ibiscum/Learning-Concurrency-in-Python | 67 | 6616986 | <reponame>ibiscum/Learning-Concurrency-in-Python
import multiprocessing
import os
class MyProcess(multiprocessing.Process):
def __init__(self):
super(MyProcess, self).__init__()
def run(self):
print("Child Process PID: {}".format(multiprocessing.current_process().pid))
def main():
print("Main Process PID: {}".format(multiprocessing.current_process().pid))
myProcess = MyProcess()
myProcess.start()
myProcess.join()
processes = []
for i in range(os.cpu_count()):
process = MyProcess()
processes.append(process)
process.start()
for process in processes:
process.join()
if __name__ == '__main__':
main() | import multiprocessing
import os
class MyProcess(multiprocessing.Process):
def __init__(self):
super(MyProcess, self).__init__()
def run(self):
print("Child Process PID: {}".format(multiprocessing.current_process().pid))
def main():
print("Main Process PID: {}".format(multiprocessing.current_process().pid))
myProcess = MyProcess()
myProcess.start()
myProcess.join()
processes = []
for i in range(os.cpu_count()):
process = MyProcess()
processes.append(process)
process.start()
for process in processes:
process.join()
if __name__ == '__main__':
main() | none | 1 | 3.190997 | 3 | |
python101/Extract_Sample_attributes_xml.py | geraudazangue/Python_Projects | 0 | 6616987 | <reponame>geraudazangue/Python_Projects
import glob
import os
import os.path
from pathlib import Path
from lxml import etree
import pandas as pd
from datetime import datetime
import logging
import time
input_file = pd.read_csv("C:/Users/Gael/Desktop/CODE/RAW FILES/input_file.txt", sep =" ",header=None)
Root = etree.Element("Root")
head = etree.SubElement(Root, 'Head')
title = etree.SubElement(head, 'title')
title.text = 'CPD_EMEA'
dc = etree.SubElement(head, 'dateCreated')
dc.text = str(datetime.today())
dm = etree.SubElement(head, 'dateModified')
dm.text = str(datetime.today())
body = etree.SubElement(Root, 'body')
range = ['Computer','Fantasy']
#USE MY TIME TO NOT ERASEN TIME MODULE
mytime = datetime.strftime(datetime.now(), '%Y%m%d_%H%M%S')
file_path = "C:/Users/Gael/Desktop/CODE/RAW FILES/{0}".format('Converted_file_' + time +'.xml')
for p in Path('C:/Users/Gael/Desktop/CODE/test').glob('**/*.xml'):
context = etree.iterparse(str(p), events=('end', ))
for event, elem in context:
#Thisnis the event launcher
if elem.tag == 'book':
if elem.find('genre').text in input_file.values:
body.append(elem)
print(XML_OUTPUT)
if (elem.tag == 'PRODUCT') :
if (elem.find('Civilté').text in range):
body.append(elem)
XML_OUTPUT = etree.tostring(Root, pretty_print=True, encoding='UTF-8', xml_declaration=True)
with open(file_path, "wb") as writter:
writter.write(XML_OUTPUT)
#attention point etree istead of element tree is used.
| import glob
import os
import os.path
from pathlib import Path
from lxml import etree
import pandas as pd
from datetime import datetime
import logging
import time
input_file = pd.read_csv("C:/Users/Gael/Desktop/CODE/RAW FILES/input_file.txt", sep =" ",header=None)
Root = etree.Element("Root")
head = etree.SubElement(Root, 'Head')
title = etree.SubElement(head, 'title')
title.text = 'CPD_EMEA'
dc = etree.SubElement(head, 'dateCreated')
dc.text = str(datetime.today())
dm = etree.SubElement(head, 'dateModified')
dm.text = str(datetime.today())
body = etree.SubElement(Root, 'body')
range = ['Computer','Fantasy']
#USE MY TIME TO NOT ERASEN TIME MODULE
mytime = datetime.strftime(datetime.now(), '%Y%m%d_%H%M%S')
file_path = "C:/Users/Gael/Desktop/CODE/RAW FILES/{0}".format('Converted_file_' + time +'.xml')
for p in Path('C:/Users/Gael/Desktop/CODE/test').glob('**/*.xml'):
context = etree.iterparse(str(p), events=('end', ))
for event, elem in context:
#Thisnis the event launcher
if elem.tag == 'book':
if elem.find('genre').text in input_file.values:
body.append(elem)
print(XML_OUTPUT)
if (elem.tag == 'PRODUCT') :
if (elem.find('Civilté').text in range):
body.append(elem)
XML_OUTPUT = etree.tostring(Root, pretty_print=True, encoding='UTF-8', xml_declaration=True)
with open(file_path, "wb") as writter:
writter.write(XML_OUTPUT)
#attention point etree istead of element tree is used. | en | 0.644817 | #USE MY TIME TO NOT ERASEN TIME MODULE #Thisnis the event launcher #attention point etree istead of element tree is used. | 2.706462 | 3 |
vectors.py | beninato8/math | 0 | 6616988 | import numpy as np
import math
class Vector():
def __init__(self, *args):
self.pts = list(args)
self.dim = len(args)
def __repr__(self):
return str(self.pts)
class Plane():
"""ax + bx + cz + d = 0"""
def __init__(self, a, b, c, d):
self.pts = [a,b,c,d]
def __repr__(self):
a,b,c,d = self.pts
return f'{a}x + {b}y + {c}z + {d} = 0'
def distVecPlane(vec, plane):
if vec.dim == 3:
x, y, z = vec.pts
a,b,c,d = plane.pts
top = abs(a*x + b*y + c*z + d)
if int(top) == top:
top = int(top)
bottom = math.sqrt(a**2 + b**2 + c**2)
if int(bottom) != bottom:
print(f'\\frac{{{top}}}{{\\sqrt{{{a**2 + b**2 + c**2}}}}}')
bottom = f'sqrt({a**2 + b**2 + c**2})'
else:
print(f'\\frac{{{top}}}{{{bottom}}}')
return f'{top}/{bottom}'
def distPlanePlane(plane1, plane2):
p1 = plane1.pts
p2 = plane2.pts
if any(p1[0]/p2[0] != x for x in [p1[1]/p2[1], p1[2]/p2[2]]):
print(f'{plane1} is not parallel to\n{plane2}')
return
a = p1[0]
x = -p1[3]/a
vec = Vector(x,0,0)
return distVecPlane(vec, plane2)
def dist(a, b):
if isinstance(a, Vector) and isinstance(b, Plane):
return distVecPlane(a, b)
if isinstance(b, Vector) and isinstance(a, Plane):
return distVecPlane(b, a)
if isinstance(a, Plane) and isinstance(b, Plane):
return distPlanePlane(a, b)
a = Vector(1,-8,6)
b = Plane(3, 2, 6, -5)
c = Plane(4, -3, 1, -8)
d = Plane(8, -6, 2, -3)
# print(dist(a,b))
print(dist(c,d)) | import numpy as np
import math
class Vector():
def __init__(self, *args):
self.pts = list(args)
self.dim = len(args)
def __repr__(self):
return str(self.pts)
class Plane():
"""ax + bx + cz + d = 0"""
def __init__(self, a, b, c, d):
self.pts = [a,b,c,d]
def __repr__(self):
a,b,c,d = self.pts
return f'{a}x + {b}y + {c}z + {d} = 0'
def distVecPlane(vec, plane):
if vec.dim == 3:
x, y, z = vec.pts
a,b,c,d = plane.pts
top = abs(a*x + b*y + c*z + d)
if int(top) == top:
top = int(top)
bottom = math.sqrt(a**2 + b**2 + c**2)
if int(bottom) != bottom:
print(f'\\frac{{{top}}}{{\\sqrt{{{a**2 + b**2 + c**2}}}}}')
bottom = f'sqrt({a**2 + b**2 + c**2})'
else:
print(f'\\frac{{{top}}}{{{bottom}}}')
return f'{top}/{bottom}'
def distPlanePlane(plane1, plane2):
p1 = plane1.pts
p2 = plane2.pts
if any(p1[0]/p2[0] != x for x in [p1[1]/p2[1], p1[2]/p2[2]]):
print(f'{plane1} is not parallel to\n{plane2}')
return
a = p1[0]
x = -p1[3]/a
vec = Vector(x,0,0)
return distVecPlane(vec, plane2)
def dist(a, b):
if isinstance(a, Vector) and isinstance(b, Plane):
return distVecPlane(a, b)
if isinstance(b, Vector) and isinstance(a, Plane):
return distVecPlane(b, a)
if isinstance(a, Plane) and isinstance(b, Plane):
return distPlanePlane(a, b)
a = Vector(1,-8,6)
b = Plane(3, 2, 6, -5)
c = Plane(4, -3, 1, -8)
d = Plane(8, -6, 2, -3)
# print(dist(a,b))
print(dist(c,d)) | en | 0.18768 | ax + bx + cz + d = 0 # print(dist(a,b)) | 3.490391 | 3 |
lesson 6/results/ebra/GUIOmnivor.py | gtpedrosa/Python4WindEnergy | 48 | 6616989 | <reponame>gtpedrosa/Python4WindEnergy<gh_stars>10-100
from __future__ import print_function
import sys
sys.path.append("../py4we") # append py4we package to path to access WEFileIO
from we_file_io import WEFileIO
# from ofield_file_io import OmnivorFieldFile
import numpy as np
import numpy as np
from QtGuiLoader import QtWidgetLoader
import MyUI.OmnivorPlotControlUI
class OmnivorPlotControlWidget(QtWidgetLoader):
def __init__(self, we_file_io):
try:self.ui = MyUI.OmnivorPlotControlUI.Ui_Form() # Enables autocompletion (if you are lucky...)
except: pass
QtWidgetLoader.__init__(self, ui_module=MyUI.OmnivorPlotControlUI)
self.we_file_io = we_file_io
#Connect widget signals to actionUpdatePlot
self.ui.xLineEdit.editingFinished.connect(self.actionUpdatePlot)
self.ui.yLineEdit.editingFinished.connect(self.actionUpdatePlot)
self.ui.colorComboBox.currentIndexChanged.connect(self.actionUpdatePlot)
self.ui.horizontalSlider.valueChanged.connect(self.actionUpdatePlot)
self.ui.spinBox.valueChanged.connect(self.actionUpdatePlot)
self.ui.doubleSpinBox.valueChanged.connect(self.actionUpdatePlot)
# x_str = property(lambda self : str(self.ui.xLineEdit.text()))
# y_str = property(lambda self : str(self.ui.yLineEdit.text()))
color = property(lambda self : str(self.ui.colorComboBox.currentText()))
width = property(lambda self : self.ui.horizontalSlider.value())
ylim = property(lambda self : (self.ui.spinBox.value(), self.ui.doubleSpinBox.value()))
def actionUpdatePlot(self):
self.we_file_io.plot()
from we_file_io import WEFileIO, TestWEFileIO
import unittest
import numpy as np
import os.path as path
# To read fortran binary files
from fortran_file import FortranFile
import tempfile
import os
import sys
class OmnivorFieldFile(WEFileIO):
ID_GRID_FIELD =1
ID_FLAT_FIELD =0
def __init__(self, mpl_widget):
WEFileIO.__init__(self, file_type_name = "Exercise2file", file_extension = ".dat")
self.figure = mpl_widget.figure
self.ui_control = OmnivorPlotControlWidget(self)
# self.plot()
def _write(self):
""" Write a file (overrided)
"""
f = FortranFile(self.filename,mode='w')
# Default omnivor binary header
f.writeInts ( self.data['MK'] , 'i' )
f.writeInts ( self.data['itime'] , 'i' )
f.writeString ( self.data['version'] )
f.writeInts ( self.data['file_id'] , 'i' )
f.writeString ( self.data['sversion'] )
# Velocity field
f.writeString ( self.data['stype'] )
f.writeInts ( self.data['is_grid'] , 'i' )
f.writeInts ( self.data['nCPs'] , 'i' )
if self.data['MK'] == 8:
real_char='d'
else:
real_char='f'
if self.data['is_grid']:
f.writeInts ( self.data['n1'] , 'i' )
f.writeInts ( self.data['n2'] , 'i' )
f.writeInts ( self.data['n3'] , 'i' )
f.writeInts ( self.data['is_straight'] , 'i' )
f.writeReals ( self.data['v1'] , real_char )
f.writeReals ( self.data['v2'] , real_char )
f.writeReals ( self.data['v3'] , real_char )
CPs = self.data['CPs'].flatten(order = 'F')
Utot = self.data['Utot'].flatten(order = 'F')
f.writeReals(CPs,real_char)
f.writeReals(Utot,real_char)
def _read(self):
""" Read the file (overrided)
"""
with open(self.filename, 'r') as f:
# self.title = f.read()
# initializng data dictionary
print('Opening: %s'%self.filename)
self.data={}
f = FortranFile(self.filename)
# Default omnivor binary header
self.data['MK'] = f.readInts('i')
self.data['itime'] = f.readInts('i')
self.data['version'] = f.readString()
self.data['file_id'] = f.readInts('i')
self.data['sversion'] = f.readString()
# Velocity field
self.data['stype'] = f.readString()
self.data['is_grid'] = f.readInts('i')
nCPs = f.readInts('i')
self.data['nCPs'] = nCPs
if self.data['MK'] == 8:
real_char='d'
else:
real_char='f'
if self.data['is_grid']:
#print('File is a velocity grid file')
n1 = f.readInts('i')
n2 = f.readInts('i')
n3 = f.readInts('i')
self.data['n1'] = n1
self.data['n2'] = n2
self.data['n3'] = n3
self.data['is_straight'] = f.readInts('i')
self.data['v1'] = f.readReals(real_char)
self.data['v2'] = f.readReals(real_char)
self.data['v3'] = f.readReals(real_char)
CPs_raw = f.readReals(real_char)
Utot_raw = f.readReals(real_char)
CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')
Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')
acc=-1
CPsTab = np.zeros((3, n1,n2,n3))
UtotTab = np.zeros((3, n1,n2,n3))
# Reshaping the nasty way (this is natural order).
for i in range(0,n1):
for j in range(0,n2):
for k in range(0,n3):
acc=acc+1
CPsTab[0:3,i,j,k] = CPs[0:3,acc]
UtotTab[0:3,i,j,k] = Utot[0:3,acc]
self.data['CPs'] = CPs
self.data['CPsTab'] = CPsTab
self.data['Utot'] = Utot
self.data['UtotTab'] = UtotTab
self.plot()
def _plot(self,fig):
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import animation
# Control points and velocity
CPs=self.data['CPsTab']
Utot=self.data['UtotTab']
# dimensions and grid vectors
nd,nx,ny,nz=np.shape(CPs)
n={'x':nx ,'y':ny,'z':nz}
xi=CPs[0,:,0,0]
yi=CPs[1,0,:,0]
zi=CPs[2,0,0,:]
#
# plt.contour(xi, zi, uiy, 15, linewidths = 0.5, colors = 'k')
# plt.pcolormesh(xi, zi, uiy, cmap = plt.get_cmap('rainbow'))
## try an unsteady contour
# Figure handle and plotting function
# fig = plt.figure()
axes = fig.axes[0]
# print(self.ui_control.x_str)
# print(self.ui_control.y_str)
# axes.set_ylim(self.ui_control.ylim)
# axes.set_title(self.title)
# fig.canvas.draw()
# fig=plt.figure()
ax=fig.gca()
# ax = fig.axes[0]
def animate(i,normal,component,ax,fig):
c2i={'x':0 ,'y':1,'z':2}
icomp=c2i[component]
Umean=np.mean(Utot[icomp,:,:,:])
Ustd=np.std(Utot[icomp,:,:,:])
if normal=='y':
x=xi
y=zi
ui=Utot[icomp,:,i,:]
ui=ui.transpose()
elif normal=='z':
x=xi
y=yi
ui=Utot[icomp,:,:,i]
ui=ui.transpose()
else:
x=yi
y=zi
ui=Utot[icomp,i,:,:]
ui=ui.transpose()
print(str(i)+' / '+str(n[normal]))
ax.cla()
# ax.set_xlabel('Streamwise direction [m]')
# ax.set_ylabel('Lateral direction [m]')
ax.set_title('Velocity along %s [m/s]'%component)
im=ax.contourf(x,y,ui,levels=np.arange(Umean-5*Ustd,Umean+5*Ustd,Ustd/30),cmap=plt.get_cmap('hsv'),linewidth=0.1)
# plt.axis('equal')
# if i==1:
# plt.colorbar(im)
# if i==(n[normal]-1):
# plt.close()
return im
# Component selection and normal for plotting
component='y'
normal='y'
ani = animation.FuncAnimation(fig, animate,frames=n[normal], fargs=(normal,component,ax,fig))
fig.canvas.draw()
# plt.show()
# ani.save('basic_animation.mp4', writer=animation.FFMpegWriter(fps=20,bitrate=10000,codec='libx264'))
# fig = plt.figure()
# i=1
# ax.scatter(CPs[0,i,:,:], CPs[1,i,:,:],CPs[2,i,:,:])
# ax.scatter(CPs[0,:,i,:], CPs[1,:,i,:],CPs[2,:,i,:])
# ax = fig.add_subplot(111, projection='3d')
# ax.set_xlabel('X Label')
# ax.set_ylabel('Y Label')
# ax.set_zlabel('Z Label')
#
def __getitem__(self, key):
""" Transform the class instance into a dictionary."""
return self.data[key]
def __setitem__(self, key, value):
""" Transform the class instance into a dictionary."""
self.data[key] = value
#
## Do Some testing -------------------------------------------------------
class TestOmnivorField(TestWEFileIO):
""" Test class for OmnivorField class """
# test_file = './test/omnivor/ugrid_8584.dat'
# test_file = './test/omnivor/uservel_8584.dat'
# InputFile=OmnivorFieldFile(test_file)
# InputFile.plot()
# # InputFile.write(test_file+'_new')
#
def test_duplication(self):
import os
original_file, new_file = self._duplicate(OmnivorFieldFile, self.test_file)
# self.assertEqual(original_file.data[], new_file.data)
np.array_equal(original_file.data['CPs'], new_file.data['CPs'])
np.array_equal(original_file.data['Utot'], new_file.data['Utot'])
os.remove(new_file.filename)
# --------------------------------------------------------------------------------
# ---
# --------------------------------------------------------------------------------
# class MyPlotFileIO(WEFileIO):
# title = "No title"
# def __init__(self, mpl_widget):
# WEFileIO.__init__(self, file_type_name = "Exercise2file", file_extension = ".title")
# self.figure = mpl_widget.figure
# self.ui_control = MyPlotControlWidget(self)
# self.plot()
#
# def _read(self):
# with open(self.filename, 'r') as f:
# self.title = f.read()
# self.plot()
#
# def _plot(self, fig):
# axes = fig.axes[0]
# print(self.ui_control.x_str)
# print(self.ui_control.y_str)
# x = eval(self.ui_control.x_str)
# y = eval(self.ui_control.y_str)
# axes.plot(x,y, self.ui_control.color, linewidth=self.ui_control.width)
# axes.set_ylim(self.ui_control.ylim)
# axes.set_title(self.title)
# fig.canvas.draw()
# --------------------------------------------------------------------------------
# ---
# --------------------------------------------------------------------------------
import MyUI.MyPlotMainWindowUI
from QtGuiLoader import QtMainWindowLoader
from matplotlibwidget import MatplotlibWidget
from PyQt4 import QtGui
class MyPlotMainWindow(QtMainWindowLoader):
def __init__(self):
module = MyUI.MyPlotMainWindowUI
try:self.ui = module.Ui_Form() # Enables autocompletion (if you are lucky...)
except: pass
QtMainWindowLoader.__init__(self, module)
mpl = MatplotlibWidget()
self.ui.gridLayoutPlot.addWidget(mpl)
self.fileio = OmnivorFieldFile(mpl)
self.ui.gridLayoutControl.addWidget(self.fileio.ui_control)
def actionOpen(self):
filename = str(QtGui.QFileDialog.getOpenFileName(self, "Open...", ".", "*%s" % self.fileio.file_extension))
if filename == "": return #cancel
self.fileio.read(filename)
MyPlotMainWindow().start()
| from __future__ import print_function
import sys
sys.path.append("../py4we") # append py4we package to path to access WEFileIO
from we_file_io import WEFileIO
# from ofield_file_io import OmnivorFieldFile
import numpy as np
import numpy as np
from QtGuiLoader import QtWidgetLoader
import MyUI.OmnivorPlotControlUI
class OmnivorPlotControlWidget(QtWidgetLoader):
def __init__(self, we_file_io):
try:self.ui = MyUI.OmnivorPlotControlUI.Ui_Form() # Enables autocompletion (if you are lucky...)
except: pass
QtWidgetLoader.__init__(self, ui_module=MyUI.OmnivorPlotControlUI)
self.we_file_io = we_file_io
#Connect widget signals to actionUpdatePlot
self.ui.xLineEdit.editingFinished.connect(self.actionUpdatePlot)
self.ui.yLineEdit.editingFinished.connect(self.actionUpdatePlot)
self.ui.colorComboBox.currentIndexChanged.connect(self.actionUpdatePlot)
self.ui.horizontalSlider.valueChanged.connect(self.actionUpdatePlot)
self.ui.spinBox.valueChanged.connect(self.actionUpdatePlot)
self.ui.doubleSpinBox.valueChanged.connect(self.actionUpdatePlot)
# x_str = property(lambda self : str(self.ui.xLineEdit.text()))
# y_str = property(lambda self : str(self.ui.yLineEdit.text()))
color = property(lambda self : str(self.ui.colorComboBox.currentText()))
width = property(lambda self : self.ui.horizontalSlider.value())
ylim = property(lambda self : (self.ui.spinBox.value(), self.ui.doubleSpinBox.value()))
def actionUpdatePlot(self):
self.we_file_io.plot()
from we_file_io import WEFileIO, TestWEFileIO
import unittest
import numpy as np
import os.path as path
# To read fortran binary files
from fortran_file import FortranFile
import tempfile
import os
import sys
class OmnivorFieldFile(WEFileIO):
ID_GRID_FIELD =1
ID_FLAT_FIELD =0
def __init__(self, mpl_widget):
WEFileIO.__init__(self, file_type_name = "Exercise2file", file_extension = ".dat")
self.figure = mpl_widget.figure
self.ui_control = OmnivorPlotControlWidget(self)
# self.plot()
def _write(self):
""" Write a file (overrided)
"""
f = FortranFile(self.filename,mode='w')
# Default omnivor binary header
f.writeInts ( self.data['MK'] , 'i' )
f.writeInts ( self.data['itime'] , 'i' )
f.writeString ( self.data['version'] )
f.writeInts ( self.data['file_id'] , 'i' )
f.writeString ( self.data['sversion'] )
# Velocity field
f.writeString ( self.data['stype'] )
f.writeInts ( self.data['is_grid'] , 'i' )
f.writeInts ( self.data['nCPs'] , 'i' )
if self.data['MK'] == 8:
real_char='d'
else:
real_char='f'
if self.data['is_grid']:
f.writeInts ( self.data['n1'] , 'i' )
f.writeInts ( self.data['n2'] , 'i' )
f.writeInts ( self.data['n3'] , 'i' )
f.writeInts ( self.data['is_straight'] , 'i' )
f.writeReals ( self.data['v1'] , real_char )
f.writeReals ( self.data['v2'] , real_char )
f.writeReals ( self.data['v3'] , real_char )
CPs = self.data['CPs'].flatten(order = 'F')
Utot = self.data['Utot'].flatten(order = 'F')
f.writeReals(CPs,real_char)
f.writeReals(Utot,real_char)
def _read(self):
""" Read the file (overrided)
"""
with open(self.filename, 'r') as f:
# self.title = f.read()
# initializng data dictionary
print('Opening: %s'%self.filename)
self.data={}
f = FortranFile(self.filename)
# Default omnivor binary header
self.data['MK'] = f.readInts('i')
self.data['itime'] = f.readInts('i')
self.data['version'] = f.readString()
self.data['file_id'] = f.readInts('i')
self.data['sversion'] = f.readString()
# Velocity field
self.data['stype'] = f.readString()
self.data['is_grid'] = f.readInts('i')
nCPs = f.readInts('i')
self.data['nCPs'] = nCPs
if self.data['MK'] == 8:
real_char='d'
else:
real_char='f'
if self.data['is_grid']:
#print('File is a velocity grid file')
n1 = f.readInts('i')
n2 = f.readInts('i')
n3 = f.readInts('i')
self.data['n1'] = n1
self.data['n2'] = n2
self.data['n3'] = n3
self.data['is_straight'] = f.readInts('i')
self.data['v1'] = f.readReals(real_char)
self.data['v2'] = f.readReals(real_char)
self.data['v3'] = f.readReals(real_char)
CPs_raw = f.readReals(real_char)
Utot_raw = f.readReals(real_char)
CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')
Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')
acc=-1
CPsTab = np.zeros((3, n1,n2,n3))
UtotTab = np.zeros((3, n1,n2,n3))
# Reshaping the nasty way (this is natural order).
for i in range(0,n1):
for j in range(0,n2):
for k in range(0,n3):
acc=acc+1
CPsTab[0:3,i,j,k] = CPs[0:3,acc]
UtotTab[0:3,i,j,k] = Utot[0:3,acc]
self.data['CPs'] = CPs
self.data['CPsTab'] = CPsTab
self.data['Utot'] = Utot
self.data['UtotTab'] = UtotTab
self.plot()
def _plot(self,fig):
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import animation
# Control points and velocity
CPs=self.data['CPsTab']
Utot=self.data['UtotTab']
# dimensions and grid vectors
nd,nx,ny,nz=np.shape(CPs)
n={'x':nx ,'y':ny,'z':nz}
xi=CPs[0,:,0,0]
yi=CPs[1,0,:,0]
zi=CPs[2,0,0,:]
#
# plt.contour(xi, zi, uiy, 15, linewidths = 0.5, colors = 'k')
# plt.pcolormesh(xi, zi, uiy, cmap = plt.get_cmap('rainbow'))
## try an unsteady contour
# Figure handle and plotting function
# fig = plt.figure()
axes = fig.axes[0]
# print(self.ui_control.x_str)
# print(self.ui_control.y_str)
# axes.set_ylim(self.ui_control.ylim)
# axes.set_title(self.title)
# fig.canvas.draw()
# fig=plt.figure()
ax=fig.gca()
# ax = fig.axes[0]
def animate(i,normal,component,ax,fig):
c2i={'x':0 ,'y':1,'z':2}
icomp=c2i[component]
Umean=np.mean(Utot[icomp,:,:,:])
Ustd=np.std(Utot[icomp,:,:,:])
if normal=='y':
x=xi
y=zi
ui=Utot[icomp,:,i,:]
ui=ui.transpose()
elif normal=='z':
x=xi
y=yi
ui=Utot[icomp,:,:,i]
ui=ui.transpose()
else:
x=yi
y=zi
ui=Utot[icomp,i,:,:]
ui=ui.transpose()
print(str(i)+' / '+str(n[normal]))
ax.cla()
# ax.set_xlabel('Streamwise direction [m]')
# ax.set_ylabel('Lateral direction [m]')
ax.set_title('Velocity along %s [m/s]'%component)
im=ax.contourf(x,y,ui,levels=np.arange(Umean-5*Ustd,Umean+5*Ustd,Ustd/30),cmap=plt.get_cmap('hsv'),linewidth=0.1)
# plt.axis('equal')
# if i==1:
# plt.colorbar(im)
# if i==(n[normal]-1):
# plt.close()
return im
# Component selection and normal for plotting
component='y'
normal='y'
ani = animation.FuncAnimation(fig, animate,frames=n[normal], fargs=(normal,component,ax,fig))
fig.canvas.draw()
# plt.show()
# ani.save('basic_animation.mp4', writer=animation.FFMpegWriter(fps=20,bitrate=10000,codec='libx264'))
# fig = plt.figure()
# i=1
# ax.scatter(CPs[0,i,:,:], CPs[1,i,:,:],CPs[2,i,:,:])
# ax.scatter(CPs[0,:,i,:], CPs[1,:,i,:],CPs[2,:,i,:])
# ax = fig.add_subplot(111, projection='3d')
# ax.set_xlabel('X Label')
# ax.set_ylabel('Y Label')
# ax.set_zlabel('Z Label')
#
def __getitem__(self, key):
""" Transform the class instance into a dictionary."""
return self.data[key]
def __setitem__(self, key, value):
""" Transform the class instance into a dictionary."""
self.data[key] = value
#
## Do Some testing -------------------------------------------------------
class TestOmnivorField(TestWEFileIO):
""" Test class for OmnivorField class """
# test_file = './test/omnivor/ugrid_8584.dat'
# test_file = './test/omnivor/uservel_8584.dat'
# InputFile=OmnivorFieldFile(test_file)
# InputFile.plot()
# # InputFile.write(test_file+'_new')
#
def test_duplication(self):
import os
original_file, new_file = self._duplicate(OmnivorFieldFile, self.test_file)
# self.assertEqual(original_file.data[], new_file.data)
np.array_equal(original_file.data['CPs'], new_file.data['CPs'])
np.array_equal(original_file.data['Utot'], new_file.data['Utot'])
os.remove(new_file.filename)
# --------------------------------------------------------------------------------
# ---
# --------------------------------------------------------------------------------
# class MyPlotFileIO(WEFileIO):
# title = "No title"
# def __init__(self, mpl_widget):
# WEFileIO.__init__(self, file_type_name = "Exercise2file", file_extension = ".title")
# self.figure = mpl_widget.figure
# self.ui_control = MyPlotControlWidget(self)
# self.plot()
#
# def _read(self):
# with open(self.filename, 'r') as f:
# self.title = f.read()
# self.plot()
#
# def _plot(self, fig):
# axes = fig.axes[0]
# print(self.ui_control.x_str)
# print(self.ui_control.y_str)
# x = eval(self.ui_control.x_str)
# y = eval(self.ui_control.y_str)
# axes.plot(x,y, self.ui_control.color, linewidth=self.ui_control.width)
# axes.set_ylim(self.ui_control.ylim)
# axes.set_title(self.title)
# fig.canvas.draw()
# --------------------------------------------------------------------------------
# ---
# --------------------------------------------------------------------------------
import MyUI.MyPlotMainWindowUI
from QtGuiLoader import QtMainWindowLoader
from matplotlibwidget import MatplotlibWidget
from PyQt4 import QtGui
class MyPlotMainWindow(QtMainWindowLoader):
def __init__(self):
module = MyUI.MyPlotMainWindowUI
try:self.ui = module.Ui_Form() # Enables autocompletion (if you are lucky...)
except: pass
QtMainWindowLoader.__init__(self, module)
mpl = MatplotlibWidget()
self.ui.gridLayoutPlot.addWidget(mpl)
self.fileio = OmnivorFieldFile(mpl)
self.ui.gridLayoutControl.addWidget(self.fileio.ui_control)
def actionOpen(self):
filename = str(QtGui.QFileDialog.getOpenFileName(self, "Open...", ".", "*%s" % self.fileio.file_extension))
if filename == "": return #cancel
self.fileio.read(filename)
MyPlotMainWindow().start() | en | 0.343965 | # append py4we package to path to access WEFileIO # from ofield_file_io import OmnivorFieldFile # Enables autocompletion (if you are lucky...) #Connect widget signals to actionUpdatePlot # x_str = property(lambda self : str(self.ui.xLineEdit.text())) # y_str = property(lambda self : str(self.ui.yLineEdit.text())) # To read fortran binary files # self.plot() Write a file (overrided) # Default omnivor binary header # Velocity field Read the file (overrided) # self.title = f.read() # initializng data dictionary # Default omnivor binary header # Velocity field #print('File is a velocity grid file') # Reshaping the nasty way (this is natural order). # Control points and velocity # dimensions and grid vectors # # plt.contour(xi, zi, uiy, 15, linewidths = 0.5, colors = 'k') # plt.pcolormesh(xi, zi, uiy, cmap = plt.get_cmap('rainbow')) ## try an unsteady contour # Figure handle and plotting function # fig = plt.figure() # print(self.ui_control.x_str) # print(self.ui_control.y_str) # axes.set_ylim(self.ui_control.ylim) # axes.set_title(self.title) # fig.canvas.draw() # fig=plt.figure() # ax = fig.axes[0] # ax.set_xlabel('Streamwise direction [m]') # ax.set_ylabel('Lateral direction [m]') # plt.axis('equal') # if i==1: # plt.colorbar(im) # if i==(n[normal]-1): # plt.close() # Component selection and normal for plotting # plt.show() # ani.save('basic_animation.mp4', writer=animation.FFMpegWriter(fps=20,bitrate=10000,codec='libx264')) # fig = plt.figure() # i=1 # ax.scatter(CPs[0,i,:,:], CPs[1,i,:,:],CPs[2,i,:,:]) # ax.scatter(CPs[0,:,i,:], CPs[1,:,i,:],CPs[2,:,i,:]) # ax = fig.add_subplot(111, projection='3d') # ax.set_xlabel('X Label') # ax.set_ylabel('Y Label') # ax.set_zlabel('Z Label') # Transform the class instance into a dictionary. Transform the class instance into a dictionary. # ## Do Some testing ------------------------------------------------------- Test class for OmnivorField class # test_file = './test/omnivor/ugrid_8584.dat' # test_file = './test/omnivor/uservel_8584.dat' # InputFile=OmnivorFieldFile(test_file) # InputFile.plot() # # InputFile.write(test_file+'_new') # # self.assertEqual(original_file.data[], new_file.data) # -------------------------------------------------------------------------------- # --- # -------------------------------------------------------------------------------- # class MyPlotFileIO(WEFileIO): # title = "No title" # def __init__(self, mpl_widget): # WEFileIO.__init__(self, file_type_name = "Exercise2file", file_extension = ".title") # self.figure = mpl_widget.figure # self.ui_control = MyPlotControlWidget(self) # self.plot() # # def _read(self): # with open(self.filename, 'r') as f: # self.title = f.read() # self.plot() # # def _plot(self, fig): # axes = fig.axes[0] # print(self.ui_control.x_str) # print(self.ui_control.y_str) # x = eval(self.ui_control.x_str) # y = eval(self.ui_control.y_str) # axes.plot(x,y, self.ui_control.color, linewidth=self.ui_control.width) # axes.set_ylim(self.ui_control.ylim) # axes.set_title(self.title) # fig.canvas.draw() # -------------------------------------------------------------------------------- # --- # -------------------------------------------------------------------------------- # Enables autocompletion (if you are lucky...) #cancel | 2.360039 | 2 |
mspypeline/file_reader/MQReader.py | anoburn/mspypeline | 0 | 6616990 | <gh_stars>0
import os
import pandas as pd
from pandas.api.types import is_numeric_dtype
import logging
from mspypeline.helpers import dict_depth, get_analysis_design
from mspypeline.file_reader import BaseReader, MissingFilesException
from mspypeline.core import MaxQuantPlotter
class MQReader(BaseReader):
proteins_txt = "proteinGroups.txt"
peptides_txt = "peptides.txt"
mapping_txt = "sample_mapping.txt"
summary_txt = "summary.txt"
parameters_txt = "parameters.txt"
msms_scans_txt = "msmsScans.txt"
ms_scans_txt = "msScans.txt"
evidence_txt = "evidence.txt"
required_files = [proteins_txt]
name = "mqreader"
plotter = MaxQuantPlotter
def __init__(self, start_dir: str,
reader_config: dict,
index_col: str = "Gene name",
duplicate_handling: str = "sum",
loglevel=logging.DEBUG):
super().__init__(start_dir, reader_config, loglevel=loglevel)
# TODO connect this to the configs of the initializer
self.data_dir = os.path.join(self.start_dir, "txt") # TODO only add this if is not there
self.index_col = index_col
self.duplicate_handling = duplicate_handling
# read a sample of all required files. If any required file is missing exit
# but we need only one file from the max quant results
try:
file_dir = os.path.join(self.data_dir, self.proteins_txt)
df = pd.read_csv(file_dir, sep="\t", nrows=5)
self.proteins_txt_columns = df.columns
except FileNotFoundError:
raise MissingFilesException("Could find all of: " + ", ".join(MQReader.required_files))
# read the sample name mapping file
try:
self.mapping_txt = pd.read_csv(os.path.join(self.start_dir, MQReader.mapping_txt),
sep="\t", header=0, index_col=None)
if self.mapping_txt.shape[1] != 2:
raise ValueError(f"{MQReader.mapping_txt} should have two columns")
duplicated_old = self.mapping_txt.iloc[:, 0].duplicated(keep=False)
duplicated_new = self.mapping_txt.iloc[:, 1].duplicated(keep=False)
if any(duplicated_new) or any(duplicated_old):
raise ValueError(f"{MQReader.mapping_txt} should only contain unique rows, "
f"{self.mapping_txt.iloc[:, 0][duplicated_old]}, "
f"{self.mapping_txt.iloc[:, 1][duplicated_new]}")
self.logger.info("Successfully loaded %s", MQReader.mapping_txt)
except FileNotFoundError:
self.mapping_txt = None
# rename all columns based on the mapping
self.new_proteins_txt_columns = self.proteins_txt_columns
if self.mapping_txt is not None:
self.new_proteins_txt_columns = self.rename_df_columns(self.new_proteins_txt_columns)
# get columns that should be dropped
to_drop = self.reader_config.get("drop_columns", [])
if to_drop:
if not isinstance(to_drop, list):
to_drop = [to_drop]
# subset on all columns that start with intensity
self.intensity_column_names = sorted([x.replace("Intensity ", "") for x in self.new_proteins_txt_columns
if x.startswith('Intensity ')], key=len, reverse=True)
self.intensity_column_names = [x for x in self.intensity_column_names if x not in to_drop]
if not self.reader_config.get("all_replicates", False):
self.reader_config["all_replicates"] = self.intensity_column_names
# check the naming convention
self.naming_convention = self.check_naming_convention()
# determine the grouping
self.analysis_design = self.determine_groupings()
if not self.reader_config.get("analysis_design", False):
self.reader_config["analysis_design"] = self.analysis_design
self.reader_config["levels"] = dict_depth(self.analysis_design)
self.reader_config["level_names"] = [x for x in range(self.reader_config["levels"])]
def rename_df_columns(self, col_names: list) -> list:
if self.mapping_txt is None:
return col_names
mapping_dict = {old_name: new_name for old_name, new_name
in zip(self.mapping_txt.iloc[:, 0], self.mapping_txt.iloc[:, 1])}
def find_key_match(col):
matches = []
for key in mapping_dict:
if key in col:
matches.append(key)
matches = sorted(matches, key=len)
if len(matches) == 0:
return ""
else:
return matches[-1]
match_list = [find_key_match(col) for col in col_names]
# add a replace value for the default string
mapping_dict[""] = ""
return [col_names[i].replace(match_list[i], mapping_dict[match_list[i]]) for i in range(len(col_names))]
def check_naming_convention(self) -> bool:
# does the name follow the convention
first_rep_split = len(self.intensity_column_names[0].split("_"))
naming_convention = all(len(rep.split("_")) == first_rep_split for rep in self.intensity_column_names)
# create a mapping template if the convention is not followed and the file doesn't exist
if not naming_convention:
mapping_template_name = os.path.join(self.start_dir, MQReader.mapping_txt.replace("ing", "ing_template"))
self.logger.warning("Naming of experiments does not follow naming convention, "
"please consider using a %s file", MQReader.mapping_txt)
if os.path.isfile(mapping_template_name):
self.logger.warning("Currently unused %s file in the directory", mapping_template_name)
else:
self.logger.warning("Creating %s file. Please provide a mapping that follows "
"the indicated naming convention", mapping_template_name)
old_sample_names = sorted([x.replace('Intensity ', '') for x in self.proteins_txt_columns
if x.startswith('Intensity ')], key=len, reverse=True)
# dump the names that still correspond to the names in the txt files
mapping = pd.DataFrame({"old name": old_sample_names,
"new name": ["groupname_experimentname_techrepname"] * len(old_sample_names)})
mapping.to_csv(mapping_template_name, header=True, index=False, sep="\t")
return naming_convention
def determine_groupings(self):
# extract the analysis design from the file
if not self.reader_config.get("analysis_design", False):
# try to automatically determine experimental setup
# if the naming convention is followed it is quite easy
if self.naming_convention:
analysis_design = get_analysis_design(self.intensity_column_names)
# otherwise we can just guess grouping
else:
analysis_design = self.guess_analysis_design(self.intensity_column_names)
else:
analysis_design = self.reader_config.get("analysis_design")
return analysis_design
def guess_analysis_design(self, all_reps):
raise NotImplementedError("This is not implemented at the moment. Please stick to the naming convention")
# TODO update the attempted matching mechanism
def preprocess_proteinGroups(self):
file_dir = os.path.join(self.data_dir, MQReader.proteins_txt)
df_protein_groups = pd.read_csv(file_dir, sep="\t")
df_protein_groups.columns = self.rename_df_columns(df_protein_groups.columns)
not_contaminants = (df_protein_groups[
["Only identified by site", "Reverse", "Potential contaminant"]] == "+"
).sum(axis=1) == 0
self.logger.debug("Removing %s rows from %s because they are marked as contaminant",
(~not_contaminants).sum(), MQReader.proteins_txt)
df_protein_groups = df_protein_groups[not_contaminants]
if any(df_protein_groups["Fasta headers"].isna()):
self.logger.warning("Missing fasta headers using default columns for information")
gene_name = df_protein_groups["Gene names"]
sep_ind = gene_name.str.contains(";").fillna(False)
if sep_ind.sum() > 0:
gene_name[sep_ind] = gene_name[sep_ind].str.split(";", expand=True)[0]
concat_df = pd.DataFrame({
"protein id": df_protein_groups["Protein names"],
"Gene name": gene_name.str.upper(),
"Protein name": ["Missing"] * gene_name.shape[0],
})
else:
# split the fasta headers
colon_start = df_protein_groups["Fasta headers"].str.startswith(";")
df_protein_groups.loc[colon_start, "Fasta headers"] = df_protein_groups.loc[
colon_start, "Fasta headers"].str.lstrip(";")
# first split all fasta headers that contain multiple entries
sep_ind = df_protein_groups["Fasta headers"].str.contains(";").fillna(False)
# replace all fasta headers with multiple entries with only the first one
df_protein_groups.loc[sep_ind, "Fasta headers"] = \
df_protein_groups.loc[sep_ind, "Fasta headers"].str.split(";", expand=True)[0]
# split the fasta headers with the pipe symbol
fasta_col = df_protein_groups["Fasta headers"].str.split("|", n=2).apply(pd.Series)
fasta_col.columns = ["trash", "protein id", "description"]
# extract the gene name from the description eg: "GN=abcd"
gene_names_fasta = fasta_col["description"].str.extract(r"(GN=(.*?)(\s|$))")[1]
# added upper() function to avoid that non-human gene names are not recognized
concat_df = pd.DataFrame({
"protein id": fasta_col["protein id"],
"Gene name": gene_names_fasta.str.upper(),
"Protein name": fasta_col["description"].str.split("_", expand=True)[0]
})
# concat all important columns with the original dataframe
df_protein_groups = pd.concat([df_protein_groups, concat_df], axis=1)
# remove all rows where the column used for indexing is missing
mask = ~pd.isna(df_protein_groups[self.index_col])
df_protein_groups = df_protein_groups.loc[mask]
if ~mask.sum() > 0:
self.logger.warning("Removing %s rows because the index col information from: %s is missing",
~mask.sum(), self.index_col)
# set index
self.logger.info("Setting index of %s to %s", MQReader.proteins_txt, self.index_col)
df_protein_groups = df_protein_groups.set_index(df_protein_groups[self.index_col], drop=False)
# convert all non numeric intensities
for col in [col for col in df_protein_groups.columns if "Intensity " in col or "LFQ " in col or "iBAQ " in col]:
if not is_numeric_dtype(df_protein_groups[col]):
df_protein_groups[col] = df_protein_groups[col].apply(lambda x: x.replace(",", ".")).fillna(0)
df_protein_groups[col] = df_protein_groups[col].astype("int64")
# handle all rows with duplicated index column
duplicates = df_protein_groups.duplicated(subset=self.index_col, keep=False)
if any(duplicates):
self.logger.warning("Found duplicates in %s column. Duplicate names: %s",
self.index_col, ", ".join(df_protein_groups[duplicates].loc[:, self.index_col]))
if self.duplicate_handling == "drop":
self.logger.warning("Dropping all %s duplicates.", duplicates.sum())
df_protein_groups = df_protein_groups.drop_duplicates(subset=self.index_col, keep=False)
elif self.duplicate_handling == "sum":
def group_sum(x):
x.iloc[0].loc[x.select_dtypes("number").columns] = x.sum(axis=0, numeric_only=True)
return x.iloc[0]
new_index = df_protein_groups.index.drop_duplicates(keep=False)
duplicate_index = df_protein_groups.index.difference(new_index)
df_dup = df_protein_groups.loc[duplicate_index, :]
self.logger.warning("Merging %s rows into %s by summing numerical columns. "
"Some information might be incorrect", df_dup.shape[0], duplicate_index.shape[0])
df_dup = df_dup.groupby(df_dup.index).apply(group_sum)
df_protein_groups = pd.concat([df_protein_groups.loc[new_index, :], df_dup], axis=0)
self.logger.debug("%s shape after preprocessing: %s", MQReader.proteins_txt, df_protein_groups.shape)
return df_protein_groups
def preprocess_peptides(self):
file_dir = os.path.join(self.data_dir, MQReader.peptides_txt)
df_peptides = pd.read_csv(file_dir, sep="\t")
df_peptides.columns = self.rename_df_columns(df_peptides.columns)
not_contaminants = (df_peptides[
["Reverse", "Potential contaminant"]] == "+"
).sum(axis=1) == 0
df_peptides = df_peptides[not_contaminants]
self.logger.debug("Removing %s rows from %s because they are marked as contaminant",
(~not_contaminants).sum(), MQReader.peptides_txt)
return df_peptides
def preprocess_summary(self):
file_dir = os.path.join(self.data_dir, MQReader.summary_txt)
df_summary = pd.read_csv(file_dir, sep="\t")
df_summary.columns = self.rename_df_columns(df_summary.columns)
df_summary = df_summary[df_summary["Enzyme"].notna()]
df_summary["Experiment"] = self.rename_df_columns(df_summary["Experiment"])
return df_summary
def preprocess_parameters(self):
file_dir = os.path.join(self.data_dir, MQReader.parameters_txt)
df_parameters = pd.read_csv(file_dir, sep="\t", index_col=[0], squeeze=True)
return df_parameters
def preprocess_evidence(self):
file_dir = os.path.join(self.data_dir, MQReader.evidence_txt)
df_evidence = pd.read_csv(file_dir, sep="\t")
not_contaminants = (df_evidence[["Reverse", "Potential contaminant"]] == "+").sum(axis=1) == 0
df_evidence = df_evidence[not_contaminants]
df_evidence.columns = self.rename_df_columns(df_evidence.columns)
df_evidence["Experiment"] = self.rename_df_columns(df_evidence["Experiment"].tolist())
return df_evidence
def preprocess_msScans(self):
file_dir = os.path.join(self.data_dir, MQReader.ms_scans_txt)
df_msscans = pd.read_csv(file_dir, sep="\t", index_col=[0],
usecols=["Raw file", "Total ion current", "Retention time"])
return df_msscans
def preprocess_msmsScans(self):
file_dir = os.path.join(self.data_dir, MQReader.msms_scans_txt)
df_msmsscans = pd.read_csv(file_dir, sep="\t", index_col=[0],
usecols=["Raw file", "Total ion current", "Retention time"])
return df_msmsscans
| import os
import pandas as pd
from pandas.api.types import is_numeric_dtype
import logging
from mspypeline.helpers import dict_depth, get_analysis_design
from mspypeline.file_reader import BaseReader, MissingFilesException
from mspypeline.core import MaxQuantPlotter
class MQReader(BaseReader):
proteins_txt = "proteinGroups.txt"
peptides_txt = "peptides.txt"
mapping_txt = "sample_mapping.txt"
summary_txt = "summary.txt"
parameters_txt = "parameters.txt"
msms_scans_txt = "msmsScans.txt"
ms_scans_txt = "msScans.txt"
evidence_txt = "evidence.txt"
required_files = [proteins_txt]
name = "mqreader"
plotter = MaxQuantPlotter
def __init__(self, start_dir: str,
reader_config: dict,
index_col: str = "Gene name",
duplicate_handling: str = "sum",
loglevel=logging.DEBUG):
super().__init__(start_dir, reader_config, loglevel=loglevel)
# TODO connect this to the configs of the initializer
self.data_dir = os.path.join(self.start_dir, "txt") # TODO only add this if is not there
self.index_col = index_col
self.duplicate_handling = duplicate_handling
# read a sample of all required files. If any required file is missing exit
# but we need only one file from the max quant results
try:
file_dir = os.path.join(self.data_dir, self.proteins_txt)
df = pd.read_csv(file_dir, sep="\t", nrows=5)
self.proteins_txt_columns = df.columns
except FileNotFoundError:
raise MissingFilesException("Could find all of: " + ", ".join(MQReader.required_files))
# read the sample name mapping file
try:
self.mapping_txt = pd.read_csv(os.path.join(self.start_dir, MQReader.mapping_txt),
sep="\t", header=0, index_col=None)
if self.mapping_txt.shape[1] != 2:
raise ValueError(f"{MQReader.mapping_txt} should have two columns")
duplicated_old = self.mapping_txt.iloc[:, 0].duplicated(keep=False)
duplicated_new = self.mapping_txt.iloc[:, 1].duplicated(keep=False)
if any(duplicated_new) or any(duplicated_old):
raise ValueError(f"{MQReader.mapping_txt} should only contain unique rows, "
f"{self.mapping_txt.iloc[:, 0][duplicated_old]}, "
f"{self.mapping_txt.iloc[:, 1][duplicated_new]}")
self.logger.info("Successfully loaded %s", MQReader.mapping_txt)
except FileNotFoundError:
self.mapping_txt = None
# rename all columns based on the mapping
self.new_proteins_txt_columns = self.proteins_txt_columns
if self.mapping_txt is not None:
self.new_proteins_txt_columns = self.rename_df_columns(self.new_proteins_txt_columns)
# get columns that should be dropped
to_drop = self.reader_config.get("drop_columns", [])
if to_drop:
if not isinstance(to_drop, list):
to_drop = [to_drop]
# subset on all columns that start with intensity
self.intensity_column_names = sorted([x.replace("Intensity ", "") for x in self.new_proteins_txt_columns
if x.startswith('Intensity ')], key=len, reverse=True)
self.intensity_column_names = [x for x in self.intensity_column_names if x not in to_drop]
if not self.reader_config.get("all_replicates", False):
self.reader_config["all_replicates"] = self.intensity_column_names
# check the naming convention
self.naming_convention = self.check_naming_convention()
# determine the grouping
self.analysis_design = self.determine_groupings()
if not self.reader_config.get("analysis_design", False):
self.reader_config["analysis_design"] = self.analysis_design
self.reader_config["levels"] = dict_depth(self.analysis_design)
self.reader_config["level_names"] = [x for x in range(self.reader_config["levels"])]
def rename_df_columns(self, col_names: list) -> list:
if self.mapping_txt is None:
return col_names
mapping_dict = {old_name: new_name for old_name, new_name
in zip(self.mapping_txt.iloc[:, 0], self.mapping_txt.iloc[:, 1])}
def find_key_match(col):
matches = []
for key in mapping_dict:
if key in col:
matches.append(key)
matches = sorted(matches, key=len)
if len(matches) == 0:
return ""
else:
return matches[-1]
match_list = [find_key_match(col) for col in col_names]
# add a replace value for the default string
mapping_dict[""] = ""
return [col_names[i].replace(match_list[i], mapping_dict[match_list[i]]) for i in range(len(col_names))]
def check_naming_convention(self) -> bool:
# does the name follow the convention
first_rep_split = len(self.intensity_column_names[0].split("_"))
naming_convention = all(len(rep.split("_")) == first_rep_split for rep in self.intensity_column_names)
# create a mapping template if the convention is not followed and the file doesn't exist
if not naming_convention:
mapping_template_name = os.path.join(self.start_dir, MQReader.mapping_txt.replace("ing", "ing_template"))
self.logger.warning("Naming of experiments does not follow naming convention, "
"please consider using a %s file", MQReader.mapping_txt)
if os.path.isfile(mapping_template_name):
self.logger.warning("Currently unused %s file in the directory", mapping_template_name)
else:
self.logger.warning("Creating %s file. Please provide a mapping that follows "
"the indicated naming convention", mapping_template_name)
old_sample_names = sorted([x.replace('Intensity ', '') for x in self.proteins_txt_columns
if x.startswith('Intensity ')], key=len, reverse=True)
# dump the names that still correspond to the names in the txt files
mapping = pd.DataFrame({"old name": old_sample_names,
"new name": ["groupname_experimentname_techrepname"] * len(old_sample_names)})
mapping.to_csv(mapping_template_name, header=True, index=False, sep="\t")
return naming_convention
def determine_groupings(self):
# extract the analysis design from the file
if not self.reader_config.get("analysis_design", False):
# try to automatically determine experimental setup
# if the naming convention is followed it is quite easy
if self.naming_convention:
analysis_design = get_analysis_design(self.intensity_column_names)
# otherwise we can just guess grouping
else:
analysis_design = self.guess_analysis_design(self.intensity_column_names)
else:
analysis_design = self.reader_config.get("analysis_design")
return analysis_design
def guess_analysis_design(self, all_reps):
raise NotImplementedError("This is not implemented at the moment. Please stick to the naming convention")
# TODO update the attempted matching mechanism
def preprocess_proteinGroups(self):
file_dir = os.path.join(self.data_dir, MQReader.proteins_txt)
df_protein_groups = pd.read_csv(file_dir, sep="\t")
df_protein_groups.columns = self.rename_df_columns(df_protein_groups.columns)
not_contaminants = (df_protein_groups[
["Only identified by site", "Reverse", "Potential contaminant"]] == "+"
).sum(axis=1) == 0
self.logger.debug("Removing %s rows from %s because they are marked as contaminant",
(~not_contaminants).sum(), MQReader.proteins_txt)
df_protein_groups = df_protein_groups[not_contaminants]
if any(df_protein_groups["Fasta headers"].isna()):
self.logger.warning("Missing fasta headers using default columns for information")
gene_name = df_protein_groups["Gene names"]
sep_ind = gene_name.str.contains(";").fillna(False)
if sep_ind.sum() > 0:
gene_name[sep_ind] = gene_name[sep_ind].str.split(";", expand=True)[0]
concat_df = pd.DataFrame({
"protein id": df_protein_groups["Protein names"],
"Gene name": gene_name.str.upper(),
"Protein name": ["Missing"] * gene_name.shape[0],
})
else:
# split the fasta headers
colon_start = df_protein_groups["Fasta headers"].str.startswith(";")
df_protein_groups.loc[colon_start, "Fasta headers"] = df_protein_groups.loc[
colon_start, "Fasta headers"].str.lstrip(";")
# first split all fasta headers that contain multiple entries
sep_ind = df_protein_groups["Fasta headers"].str.contains(";").fillna(False)
# replace all fasta headers with multiple entries with only the first one
df_protein_groups.loc[sep_ind, "Fasta headers"] = \
df_protein_groups.loc[sep_ind, "Fasta headers"].str.split(";", expand=True)[0]
# split the fasta headers with the pipe symbol
fasta_col = df_protein_groups["Fasta headers"].str.split("|", n=2).apply(pd.Series)
fasta_col.columns = ["trash", "protein id", "description"]
# extract the gene name from the description eg: "GN=abcd"
gene_names_fasta = fasta_col["description"].str.extract(r"(GN=(.*?)(\s|$))")[1]
# added upper() function to avoid that non-human gene names are not recognized
concat_df = pd.DataFrame({
"protein id": fasta_col["protein id"],
"Gene name": gene_names_fasta.str.upper(),
"Protein name": fasta_col["description"].str.split("_", expand=True)[0]
})
# concat all important columns with the original dataframe
df_protein_groups = pd.concat([df_protein_groups, concat_df], axis=1)
# remove all rows where the column used for indexing is missing
mask = ~pd.isna(df_protein_groups[self.index_col])
df_protein_groups = df_protein_groups.loc[mask]
if ~mask.sum() > 0:
self.logger.warning("Removing %s rows because the index col information from: %s is missing",
~mask.sum(), self.index_col)
# set index
self.logger.info("Setting index of %s to %s", MQReader.proteins_txt, self.index_col)
df_protein_groups = df_protein_groups.set_index(df_protein_groups[self.index_col], drop=False)
# convert all non numeric intensities
for col in [col for col in df_protein_groups.columns if "Intensity " in col or "LFQ " in col or "iBAQ " in col]:
if not is_numeric_dtype(df_protein_groups[col]):
df_protein_groups[col] = df_protein_groups[col].apply(lambda x: x.replace(",", ".")).fillna(0)
df_protein_groups[col] = df_protein_groups[col].astype("int64")
# handle all rows with duplicated index column
duplicates = df_protein_groups.duplicated(subset=self.index_col, keep=False)
if any(duplicates):
self.logger.warning("Found duplicates in %s column. Duplicate names: %s",
self.index_col, ", ".join(df_protein_groups[duplicates].loc[:, self.index_col]))
if self.duplicate_handling == "drop":
self.logger.warning("Dropping all %s duplicates.", duplicates.sum())
df_protein_groups = df_protein_groups.drop_duplicates(subset=self.index_col, keep=False)
elif self.duplicate_handling == "sum":
def group_sum(x):
x.iloc[0].loc[x.select_dtypes("number").columns] = x.sum(axis=0, numeric_only=True)
return x.iloc[0]
new_index = df_protein_groups.index.drop_duplicates(keep=False)
duplicate_index = df_protein_groups.index.difference(new_index)
df_dup = df_protein_groups.loc[duplicate_index, :]
self.logger.warning("Merging %s rows into %s by summing numerical columns. "
"Some information might be incorrect", df_dup.shape[0], duplicate_index.shape[0])
df_dup = df_dup.groupby(df_dup.index).apply(group_sum)
df_protein_groups = pd.concat([df_protein_groups.loc[new_index, :], df_dup], axis=0)
self.logger.debug("%s shape after preprocessing: %s", MQReader.proteins_txt, df_protein_groups.shape)
return df_protein_groups
def preprocess_peptides(self):
file_dir = os.path.join(self.data_dir, MQReader.peptides_txt)
df_peptides = pd.read_csv(file_dir, sep="\t")
df_peptides.columns = self.rename_df_columns(df_peptides.columns)
not_contaminants = (df_peptides[
["Reverse", "Potential contaminant"]] == "+"
).sum(axis=1) == 0
df_peptides = df_peptides[not_contaminants]
self.logger.debug("Removing %s rows from %s because they are marked as contaminant",
(~not_contaminants).sum(), MQReader.peptides_txt)
return df_peptides
def preprocess_summary(self):
file_dir = os.path.join(self.data_dir, MQReader.summary_txt)
df_summary = pd.read_csv(file_dir, sep="\t")
df_summary.columns = self.rename_df_columns(df_summary.columns)
df_summary = df_summary[df_summary["Enzyme"].notna()]
df_summary["Experiment"] = self.rename_df_columns(df_summary["Experiment"])
return df_summary
def preprocess_parameters(self):
file_dir = os.path.join(self.data_dir, MQReader.parameters_txt)
df_parameters = pd.read_csv(file_dir, sep="\t", index_col=[0], squeeze=True)
return df_parameters
def preprocess_evidence(self):
file_dir = os.path.join(self.data_dir, MQReader.evidence_txt)
df_evidence = pd.read_csv(file_dir, sep="\t")
not_contaminants = (df_evidence[["Reverse", "Potential contaminant"]] == "+").sum(axis=1) == 0
df_evidence = df_evidence[not_contaminants]
df_evidence.columns = self.rename_df_columns(df_evidence.columns)
df_evidence["Experiment"] = self.rename_df_columns(df_evidence["Experiment"].tolist())
return df_evidence
def preprocess_msScans(self):
file_dir = os.path.join(self.data_dir, MQReader.ms_scans_txt)
df_msscans = pd.read_csv(file_dir, sep="\t", index_col=[0],
usecols=["Raw file", "Total ion current", "Retention time"])
return df_msscans
def preprocess_msmsScans(self):
file_dir = os.path.join(self.data_dir, MQReader.msms_scans_txt)
df_msmsscans = pd.read_csv(file_dir, sep="\t", index_col=[0],
usecols=["Raw file", "Total ion current", "Retention time"])
return df_msmsscans | en | 0.802204 | # TODO connect this to the configs of the initializer # TODO only add this if is not there # read a sample of all required files. If any required file is missing exit # but we need only one file from the max quant results # read the sample name mapping file # rename all columns based on the mapping # get columns that should be dropped # subset on all columns that start with intensity # check the naming convention # determine the grouping # add a replace value for the default string # does the name follow the convention # create a mapping template if the convention is not followed and the file doesn't exist # dump the names that still correspond to the names in the txt files # extract the analysis design from the file # try to automatically determine experimental setup # if the naming convention is followed it is quite easy # otherwise we can just guess grouping # TODO update the attempted matching mechanism # split the fasta headers # first split all fasta headers that contain multiple entries # replace all fasta headers with multiple entries with only the first one # split the fasta headers with the pipe symbol # extract the gene name from the description eg: "GN=abcd" # added upper() function to avoid that non-human gene names are not recognized # concat all important columns with the original dataframe # remove all rows where the column used for indexing is missing # set index # convert all non numeric intensities # handle all rows with duplicated index column | 2.676783 | 3 |
api.py | mohsalsaleem/RPY | 0 | 6616991 | <filename>api.py<gh_stars>0
import json
import urllib2
import requests
from pprint import pprint
with open("newsFile.json") as jsonFile:
data = json.load(jsonFile)
pprint(data)
responses = []
for item in data["NewsItem"]:
title = item["HeadLine"]
content = item["Caption"]
r = requests.post("https://kceg.herokuapp.com/messages.json?message[name]="+title+"&message[message]="+content)
responses.append(r.status_code)
print responses
#r = requests.post("https://kceg.herokuapp.com/messages.json?message[name]=saleem&message[phone]=123456789")
#print r.status_code
| <filename>api.py<gh_stars>0
import json
import urllib2
import requests
from pprint import pprint
with open("newsFile.json") as jsonFile:
data = json.load(jsonFile)
pprint(data)
responses = []
for item in data["NewsItem"]:
title = item["HeadLine"]
content = item["Caption"]
r = requests.post("https://kceg.herokuapp.com/messages.json?message[name]="+title+"&message[message]="+content)
responses.append(r.status_code)
print responses
#r = requests.post("https://kceg.herokuapp.com/messages.json?message[name]=saleem&message[phone]=123456789")
#print r.status_code
| en | 0.18544 | #r = requests.post("https://kceg.herokuapp.com/messages.json?message[name]=saleem&message[phone]=123456789") #print r.status_code | 3.448455 | 3 |
tests/ble_gatts_test.py | lagerdata/demo-nrf52-hrs | 0 | 6616992 | from lager import lager
from lager.ble import Central
HRM_SERVICE = "0000180d-0000-1000-8000-00805f9b34fb"
HRM_BODY_SENSOR_LOCATION_CHAR = "00002a38-0000-1000-8000-00805f9b34fb"
HRM_MEASUREMENT_CHAR = "00002a37-0000-1000-8000-00805f9b34fb"
BATTERY_SERVICE = "0000180f-0000-1000-8000-00805f9b34fb"
BATTERY_LEVEL_CHAR = "00002a19-0000-1000-8000-00805f9b34fb"
DEVICE_INFORMATION_SERVICE = "0000180a-0000-1000-8000-00805f9b34fb"
MFG_NAME_STRING_CHAR = "00002a29-0000-1000-8000-00805f9b34fb"
def display_gatts_table(central, device):
with central.connect(device[0]) as client:
services = client.get_services()
if not services:
raise SystemExit("No Services Found")
for service in services:
print(service)
for characteristic in service.characteristics:
print(f"\t{characteristic}")
print(f"\t{characteristic.properties}")
def test_gatts_table_hrm(central, device):
with central.connect(device[0]) as client:
print("Verifying HRM Serivce and Characteristics")
services = client.get_services()
if not services:
raise SystemExit("No Services Found")
#Check that the services, and the charactersitics for those services actually exist
hrm_service = services.get_service(HRM_SERVICE)
if not hrm_service:
raise SystemExit(f"HRM Service not found:{services}")
if not any(char.uuid == HRM_MEASUREMENT_CHAR for char in hrm_service.characteristics):
raise SystemExit("Heart Rate Measurement Not Found")
if not any(char.uuid == HRM_BODY_SENSOR_LOCATION_CHAR for char in hrm_service.characteristics):
raise SystemExit("Heart Rate Body Sensor Location Not Found")
#Verify the properties for each characteristics
for char in hrm_service.characteristics:
if char.uuid == HRM_MEASUREMENT_CHAR:
assert char.properties[0] == 'notify'
if char.uuid == HRM_BODY_SENSOR_LOCATION_CHAR:
assert char.properties[0] == 'read'
def test_gatts_table_battery(central, device):
with central.connect(device[0]) as client:
print("Verifying Battery Serivce and Characteristics")
services = client.get_services()
if not services:
raise SystemExit("No Services Found")
batt_service = services.get_service(BATTERY_SERVICE)
if not batt_service:
raise SystemExit(f"Battery Service not found:{services}")
if not any(char.uuid == BATTERY_LEVEL_CHAR for char in batt_service.characteristics):
raise SystemExit("Battery Level Not Found")
for char in batt_service.characteristics:
if char.uuid == BATTERY_LEVEL_CHAR:
assert char.properties[0] == 'read'
assert char.properties[1] == 'notify'
def test_gatts_table_dis(central, device):
with central.connect(device[0]) as client:
print("Verifying Device Information Serivce and Characteristics")
services = client.get_services()
if not services:
raise SystemExit("No Services Found")
di_service = services.get_service(DEVICE_INFORMATION_SERVICE)
if not di_service:
raise SystemExit(f"Device Information Service not found:{services}")
if not any(char.uuid == MFG_NAME_STRING_CHAR for char in di_service.characteristics):
raise SystemExit("Mfg String Not Found")
for char in di_service.characteristics:
if char.uuid == MFG_NAME_STRING_CHAR:
assert char.properties[0] == 'read'
def main():
gateway = lager.Lager()
dut = gateway.connect("nrf52",interface="ftdi",transport="swd",speed=3000)
print(f"Connected to DUT:{dut}")
#reset device
dut.reset()
central = Central()
device = central.scan(name='Nordic_HRM')
display_gatts_table(central, device)
test_gatts_table_hrm(central, device)
test_gatts_table_battery(central, device)
test_gatts_table_dis(central, device)
print("Brilliant!")
dut.close()
if __name__ == '__main__':
main()
| from lager import lager
from lager.ble import Central
HRM_SERVICE = "0000180d-0000-1000-8000-00805f9b34fb"
HRM_BODY_SENSOR_LOCATION_CHAR = "00002a38-0000-1000-8000-00805f9b34fb"
HRM_MEASUREMENT_CHAR = "00002a37-0000-1000-8000-00805f9b34fb"
BATTERY_SERVICE = "0000180f-0000-1000-8000-00805f9b34fb"
BATTERY_LEVEL_CHAR = "00002a19-0000-1000-8000-00805f9b34fb"
DEVICE_INFORMATION_SERVICE = "0000180a-0000-1000-8000-00805f9b34fb"
MFG_NAME_STRING_CHAR = "00002a29-0000-1000-8000-00805f9b34fb"
def display_gatts_table(central, device):
with central.connect(device[0]) as client:
services = client.get_services()
if not services:
raise SystemExit("No Services Found")
for service in services:
print(service)
for characteristic in service.characteristics:
print(f"\t{characteristic}")
print(f"\t{characteristic.properties}")
def test_gatts_table_hrm(central, device):
with central.connect(device[0]) as client:
print("Verifying HRM Serivce and Characteristics")
services = client.get_services()
if not services:
raise SystemExit("No Services Found")
#Check that the services, and the charactersitics for those services actually exist
hrm_service = services.get_service(HRM_SERVICE)
if not hrm_service:
raise SystemExit(f"HRM Service not found:{services}")
if not any(char.uuid == HRM_MEASUREMENT_CHAR for char in hrm_service.characteristics):
raise SystemExit("Heart Rate Measurement Not Found")
if not any(char.uuid == HRM_BODY_SENSOR_LOCATION_CHAR for char in hrm_service.characteristics):
raise SystemExit("Heart Rate Body Sensor Location Not Found")
#Verify the properties for each characteristics
for char in hrm_service.characteristics:
if char.uuid == HRM_MEASUREMENT_CHAR:
assert char.properties[0] == 'notify'
if char.uuid == HRM_BODY_SENSOR_LOCATION_CHAR:
assert char.properties[0] == 'read'
def test_gatts_table_battery(central, device):
with central.connect(device[0]) as client:
print("Verifying Battery Serivce and Characteristics")
services = client.get_services()
if not services:
raise SystemExit("No Services Found")
batt_service = services.get_service(BATTERY_SERVICE)
if not batt_service:
raise SystemExit(f"Battery Service not found:{services}")
if not any(char.uuid == BATTERY_LEVEL_CHAR for char in batt_service.characteristics):
raise SystemExit("Battery Level Not Found")
for char in batt_service.characteristics:
if char.uuid == BATTERY_LEVEL_CHAR:
assert char.properties[0] == 'read'
assert char.properties[1] == 'notify'
def test_gatts_table_dis(central, device):
with central.connect(device[0]) as client:
print("Verifying Device Information Serivce and Characteristics")
services = client.get_services()
if not services:
raise SystemExit("No Services Found")
di_service = services.get_service(DEVICE_INFORMATION_SERVICE)
if not di_service:
raise SystemExit(f"Device Information Service not found:{services}")
if not any(char.uuid == MFG_NAME_STRING_CHAR for char in di_service.characteristics):
raise SystemExit("Mfg String Not Found")
for char in di_service.characteristics:
if char.uuid == MFG_NAME_STRING_CHAR:
assert char.properties[0] == 'read'
def main():
gateway = lager.Lager()
dut = gateway.connect("nrf52",interface="ftdi",transport="swd",speed=3000)
print(f"Connected to DUT:{dut}")
#reset device
dut.reset()
central = Central()
device = central.scan(name='Nordic_HRM')
display_gatts_table(central, device)
test_gatts_table_hrm(central, device)
test_gatts_table_battery(central, device)
test_gatts_table_dis(central, device)
print("Brilliant!")
dut.close()
if __name__ == '__main__':
main()
| en | 0.917963 | #Check that the services, and the charactersitics for those services actually exist #Verify the properties for each characteristics #reset device | 2.691301 | 3 |
doc/_sphinx-ext/myliterate_directive.py | sathyanarayanrao/gimli | 0 | 6616993 | """
An adapted include directive with an optional preprocessor step.
By default, a python script will be converted that it fits into a literate reST
with the matplotlib plot directive using :context:.
All the code between "'''" "'''" and #! will be interpreted as reST, the rest is puted in
the plot directive
Using:
.. literate:: python.py
Options
-------
The ``literate`` directive supports the following options:
no options so far
Configuration options
---------------------
no options so far
"""
import sys, os, glob, shutil, imp, warnings, re, textwrap # , io
import traceback
#import exceptions
from docutils import io, nodes, statemachine, utils # overloads global io imported above!
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.misc import Include as BaseInclude
import sphinx
def preProcessLines(rawtext):
'''
'''
def startCodeBlock(text):
text.append('')
text.append('.. plot::')
text.append(' :context:')
text.append(' :include-source:')
text.append('')
isComment = False
text = []
for line in rawtext:
if '#!/' in line:
continue
elif '#!' in line:
line = line.replace("#!", '').lstrip()
text.append( line )
startCodeBlock( text )
continue
elif (isComment == False) and (("'''" in line) or ('"""' in line)):
isComment = True
continue
elif (isComment == True) and (("'''" in line) or ('"""' in line)):
isComment = False
startCodeBlock( text )
continue
if not isComment:
text.append(' ' + line )
else:
text.append( line )
fi = open( 'tmp.txt', 'w')
for l in text:
fi.write( l + '\n')
fi.close()
return text
#def preProcessLines( ... )
class MyLiterateInclude(BaseInclude):
"""
Like the standard "Include" directive, but interprets absolute paths
"correctly", i.e., relative to source directory.
"""
def run(self):
env = self.state.document.settings.env
if self.arguments[0].startswith('<') and \
self.arguments[0].endswith('>'):
# docutils "standard" includes, do not do path processing
return BaseInclude.run(self)
rel_filename, filename = env.relfn2path(self.arguments[0])
self.arguments[0] = filename
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = os.path.dirname(os.path.abspath(source))
path = directives.path(self.arguments[0])
if path.startswith('<') and path.endswith('>'):
path = os.path.join(self.standard_include_path, path[1:-1])
path = os.path.normpath(os.path.join(source_dir, path))
path = utils.relative_path(None, path)
path = nodes.reprunicode(path)
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
tab_width = self.options.get(
'tab-width', self.state.document.settings.tab_width)
try:
self.state.document.settings.record_dependencies.add(path)
include_file = io.FileInput(
source_path=path, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler),
handle_io_errors=None)
except IOError as error:
raise self.severe('Problems with "%s" directive path:\n%s.' %
(self.name, error))
startline = self.options.get('start-line', None)
endline = self.options.get('end-line', None)
try:
if startline or (endline is not None):
lines = include_file.readlines()
rawtext = ''.join(lines[startline:endline])
else:
rawtext = include_file.read()
except UnicodeError as error:
raise self.severe('Problem with "%s" directive:\n%s' %
(self.name, ErrorString(error)))
# start-after/end-before: no restrictions on newlines in match-text,
# and no restrictions on matching inside lines vs. line boundaries
after_text = self.options.get('start-after', None)
if after_text:
# skip content in rawtext before *and incl.* a matching text
after_index = rawtext.find(after_text)
if after_index < 0:
raise self.severe('Problem with "start-after" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[after_index + len(after_text):]
before_text = self.options.get('end-before', None)
if before_text:
# skip content in rawtext after *and incl.* a matching text
before_index = rawtext.find(before_text)
if before_index < 0:
raise self.severe('Problem with "end-before" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[:before_index]
if 'literal' in self.options:
# Convert tabs to spaces, if `tab_width` is positive.
if tab_width >= 0:
text = rawtext.expandtabs(tab_width)
else:
text = rawtext
literal_block = nodes.literal_block(rawtext, text, source=path)
literal_block.line = 1
return [literal_block]
else:
include_lines = statemachine.string2lines(
rawtext, tab_width, convert_whitespace=1)
include_lines = preProcessLines( include_lines )
self.state_machine.insert_input(include_lines, path)
return []
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_directive('literate', MyLiterateInclude ) | """
An adapted include directive with an optional preprocessor step.
By default, a python script will be converted that it fits into a literate reST
with the matplotlib plot directive using :context:.
All the code between "'''" "'''" and #! will be interpreted as reST, the rest is puted in
the plot directive
Using:
.. literate:: python.py
Options
-------
The ``literate`` directive supports the following options:
no options so far
Configuration options
---------------------
no options so far
"""
import sys, os, glob, shutil, imp, warnings, re, textwrap # , io
import traceback
#import exceptions
from docutils import io, nodes, statemachine, utils # overloads global io imported above!
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.misc import Include as BaseInclude
import sphinx
def preProcessLines(rawtext):
'''
'''
def startCodeBlock(text):
text.append('')
text.append('.. plot::')
text.append(' :context:')
text.append(' :include-source:')
text.append('')
isComment = False
text = []
for line in rawtext:
if '#!/' in line:
continue
elif '#!' in line:
line = line.replace("#!", '').lstrip()
text.append( line )
startCodeBlock( text )
continue
elif (isComment == False) and (("'''" in line) or ('"""' in line)):
isComment = True
continue
elif (isComment == True) and (("'''" in line) or ('"""' in line)):
isComment = False
startCodeBlock( text )
continue
if not isComment:
text.append(' ' + line )
else:
text.append( line )
fi = open( 'tmp.txt', 'w')
for l in text:
fi.write( l + '\n')
fi.close()
return text
#def preProcessLines( ... )
class MyLiterateInclude(BaseInclude):
"""
Like the standard "Include" directive, but interprets absolute paths
"correctly", i.e., relative to source directory.
"""
def run(self):
env = self.state.document.settings.env
if self.arguments[0].startswith('<') and \
self.arguments[0].endswith('>'):
# docutils "standard" includes, do not do path processing
return BaseInclude.run(self)
rel_filename, filename = env.relfn2path(self.arguments[0])
self.arguments[0] = filename
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = os.path.dirname(os.path.abspath(source))
path = directives.path(self.arguments[0])
if path.startswith('<') and path.endswith('>'):
path = os.path.join(self.standard_include_path, path[1:-1])
path = os.path.normpath(os.path.join(source_dir, path))
path = utils.relative_path(None, path)
path = nodes.reprunicode(path)
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
tab_width = self.options.get(
'tab-width', self.state.document.settings.tab_width)
try:
self.state.document.settings.record_dependencies.add(path)
include_file = io.FileInput(
source_path=path, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler),
handle_io_errors=None)
except IOError as error:
raise self.severe('Problems with "%s" directive path:\n%s.' %
(self.name, error))
startline = self.options.get('start-line', None)
endline = self.options.get('end-line', None)
try:
if startline or (endline is not None):
lines = include_file.readlines()
rawtext = ''.join(lines[startline:endline])
else:
rawtext = include_file.read()
except UnicodeError as error:
raise self.severe('Problem with "%s" directive:\n%s' %
(self.name, ErrorString(error)))
# start-after/end-before: no restrictions on newlines in match-text,
# and no restrictions on matching inside lines vs. line boundaries
after_text = self.options.get('start-after', None)
if after_text:
# skip content in rawtext before *and incl.* a matching text
after_index = rawtext.find(after_text)
if after_index < 0:
raise self.severe('Problem with "start-after" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[after_index + len(after_text):]
before_text = self.options.get('end-before', None)
if before_text:
# skip content in rawtext after *and incl.* a matching text
before_index = rawtext.find(before_text)
if before_index < 0:
raise self.severe('Problem with "end-before" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[:before_index]
if 'literal' in self.options:
# Convert tabs to spaces, if `tab_width` is positive.
if tab_width >= 0:
text = rawtext.expandtabs(tab_width)
else:
text = rawtext
literal_block = nodes.literal_block(rawtext, text, source=path)
literal_block.line = 1
return [literal_block]
else:
include_lines = statemachine.string2lines(
rawtext, tab_width, convert_whitespace=1)
include_lines = preProcessLines( include_lines )
self.state_machine.insert_input(include_lines, path)
return []
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_directive('literate', MyLiterateInclude ) | en | 0.638281 | An adapted include directive with an optional preprocessor step. By default, a python script will be converted that it fits into a literate reST with the matplotlib plot directive using :context:. All the code between "'''" "'''" and #! will be interpreted as reST, the rest is puted in the plot directive Using: .. literate:: python.py Options ------- The ``literate`` directive supports the following options: no options so far Configuration options --------------------- no options so far # , io #import exceptions # overloads global io imported above! " in line) or ('"""' in line)): isComment = True continue elif (isComment == True) and ((" ' in line)): isComment = False startCodeBlock( text ) continue if not isComment: text.append(' ' + line ) else: text.append( line ) fi = open( 'tmp.txt', 'w') for l in text: fi.write( l + '\n') fi.close() return text #def preProcessLines( ... ) class MyLiterateInclude(BaseInclude): # docutils "standard" includes, do not do path processing # start-after/end-before: no restrictions on newlines in match-text, # and no restrictions on matching inside lines vs. line boundaries # skip content in rawtext before *and incl.* a matching text # skip content in rawtext after *and incl.* a matching text # Convert tabs to spaces, if `tab_width` is positive. | 2.664814 | 3 |
baza/migrations/0001_initial.py | szymanskirafal/ab | 0 | 6616994 | <reponame>szymanskirafal/ab
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-09 21:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DopuszczeniaLegalizacje',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nazwa_urzadzenia', models.CharField(max_length=100)),
('nr_urzadzenia', models.CharField(max_length=50)),
('opis_czynnosci', models.CharField(max_length=150)),
('jednostka_dozorowa', models.CharField(max_length=50)),
('data_ostatniej_czynnosci', models.DateField(blank=True, null=True)),
('nr_decyzji', models.CharField(max_length=100)),
('data_najblizszej_czynnosci', models.DateField()),
('osoba_odpowiedzialna_za_nadzor', models.CharField(max_length=100)),
('uwagi', models.TextField()),
],
),
migrations.CreateModel(
name='Miejsce',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('typ', models.CharField(choices=[('stacja', 'Stacja Paliw'), ('magazyn', 'Magazyn Paliw'), ('budynek', 'Budynek')], max_length=50)),
('nazwa', models.CharField(max_length=100)),
('adres', models.CharField(max_length=150)),
('telefon', models.CharField(max_length=30)),
('created_by', models.CharField(max_length=30)),
('grupa', models.CharField(blank=True, max_length=20, null=True)),
],
),
migrations.CreateModel(
name='Obiekt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('typ', models.CharField(choices=[('stacja', 'Stacja Paliw'), ('magazyn', 'Magazyn Paliw')], max_length=100)),
('nazwa', models.CharField(max_length=100)),
('lokalizacja', models.CharField(default=None, max_length=100)),
('nr', models.CharField(default=None, max_length=100)),
('wytyczne', models.TextField(default=None)),
],
),
migrations.CreateModel(
name='ObiektK',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nazwa', models.CharField(max_length=100)),
('dane_techniczne', models.TextField()),
('miejsce', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='baza.Miejsce')),
],
),
migrations.CreateModel(
name='Przedmiot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nazwa', models.CharField(max_length=100)),
('lokalizacja', models.TextField()),
('nr', models.CharField(max_length=100)),
('wytyczne', models.TextField()),
],
),
migrations.CreateModel(
name='PrzegladyTechniczne',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nazwa_urzadzenia', models.CharField(max_length=100)),
('nr_urzadzenia', models.CharField(max_length=50)),
('opis_czynnosci', models.CharField(max_length=150)),
('jednostka_kontrolujaca', models.CharField(max_length=50)),
('data_ostatniej_czynnosci', models.DateField(blank=True, null=True)),
('nr_protokolu', models.CharField(max_length=100)),
('data_najblizszej_czynnosci', models.DateField()),
('osoba_odpowiedzialna_za_nadzor', models.CharField(max_length=100)),
('uwagi', models.TextField()),
('obiektk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='baza.ObiektK')),
],
),
migrations.CreateModel(
name='Urzadzenie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nazwa', models.CharField(max_length=100)),
('lokalizacja', models.TextField()),
('nr', models.CharField(max_length=100)),
('wytyczne', models.TextField()),
('obiekt', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='baza.Obiekt')),
],
),
migrations.AddField(
model_name='przedmiot',
name='urzadzenie',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='baza.Urzadzenie'),
),
migrations.AddField(
model_name='dopuszczenialegalizacje',
name='obiektk',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='baza.ObiektK'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-09 21:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DopuszczeniaLegalizacje',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nazwa_urzadzenia', models.CharField(max_length=100)),
('nr_urzadzenia', models.CharField(max_length=50)),
('opis_czynnosci', models.CharField(max_length=150)),
('jednostka_dozorowa', models.CharField(max_length=50)),
('data_ostatniej_czynnosci', models.DateField(blank=True, null=True)),
('nr_decyzji', models.CharField(max_length=100)),
('data_najblizszej_czynnosci', models.DateField()),
('osoba_odpowiedzialna_za_nadzor', models.CharField(max_length=100)),
('uwagi', models.TextField()),
],
),
migrations.CreateModel(
name='Miejsce',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('typ', models.CharField(choices=[('stacja', 'Stacja Paliw'), ('magazyn', 'Magazyn Paliw'), ('budynek', 'Budynek')], max_length=50)),
('nazwa', models.CharField(max_length=100)),
('adres', models.CharField(max_length=150)),
('telefon', models.CharField(max_length=30)),
('created_by', models.CharField(max_length=30)),
('grupa', models.CharField(blank=True, max_length=20, null=True)),
],
),
migrations.CreateModel(
name='Obiekt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('typ', models.CharField(choices=[('stacja', 'Stacja Paliw'), ('magazyn', 'Magazyn Paliw')], max_length=100)),
('nazwa', models.CharField(max_length=100)),
('lokalizacja', models.CharField(default=None, max_length=100)),
('nr', models.CharField(default=None, max_length=100)),
('wytyczne', models.TextField(default=None)),
],
),
migrations.CreateModel(
name='ObiektK',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nazwa', models.CharField(max_length=100)),
('dane_techniczne', models.TextField()),
('miejsce', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='baza.Miejsce')),
],
),
migrations.CreateModel(
name='Przedmiot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nazwa', models.CharField(max_length=100)),
('lokalizacja', models.TextField()),
('nr', models.CharField(max_length=100)),
('wytyczne', models.TextField()),
],
),
migrations.CreateModel(
name='PrzegladyTechniczne',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nazwa_urzadzenia', models.CharField(max_length=100)),
('nr_urzadzenia', models.CharField(max_length=50)),
('opis_czynnosci', models.CharField(max_length=150)),
('jednostka_kontrolujaca', models.CharField(max_length=50)),
('data_ostatniej_czynnosci', models.DateField(blank=True, null=True)),
('nr_protokolu', models.CharField(max_length=100)),
('data_najblizszej_czynnosci', models.DateField()),
('osoba_odpowiedzialna_za_nadzor', models.CharField(max_length=100)),
('uwagi', models.TextField()),
('obiektk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='baza.ObiektK')),
],
),
migrations.CreateModel(
name='Urzadzenie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nazwa', models.CharField(max_length=100)),
('lokalizacja', models.TextField()),
('nr', models.CharField(max_length=100)),
('wytyczne', models.TextField()),
('obiekt', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='baza.Obiekt')),
],
),
migrations.AddField(
model_name='przedmiot',
name='urzadzenie',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='baza.Urzadzenie'),
),
migrations.AddField(
model_name='dopuszczenialegalizacje',
name='obiektk',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='baza.ObiektK'),
),
] | en | 0.842419 | # -*- coding: utf-8 -*- # Generated by Django 1.9 on 2015-12-09 21:44 | 1.650972 | 2 |
py-ln-gateway/py_ln_gateway/models.py | jtimon/multi-ln-demo | 6 | 6616995 |
from sqlalchemy import Column, DateTime, Integer, Numeric, String, LargeBinary, TypeDecorator
from py_ln_gateway.db import Base
# There is no absolute limit for bolt11. There are practical limits based on QR code sizes.
# There's no maximum to find in the spec, but apparently 2048 for validation and storage is good enough as a guess.
# lnd accepts invoices up to 7089 bytes https://github.com/lightningnetwork/lnd/blob/master/zpay32/invoice.go#L79
MAX_BOLT11 = 2048
MAX_URL_LEN = 200
class ByteHexString(TypeDecorator):
"""Convert a string with hexadecimal digits to bytestring for storage and back."""
impl = LargeBinary
def process_bind_param(self, value, dialect):
if not value:
return None
if not isinstance(value, str):
raise TypeError("ByteHexString columns support only str values.")
return bytes.fromhex(value)
def process_result_value(self, value, dialect):
return value.hex() if value else None
class Price(Base):
__tablename__ = 'prices'
# The id is composed by the src_chain_id followed by the dest_chain_id
src_dest = Column(ByteHexString(2 * 32), primary_key=True)
price = Column(Numeric(10,4), nullable=False)
class PendingRequest(Base):
__tablename__ = 'pending_requests'
src_payment_hash = Column(ByteHexString(32), primary_key=True)
src_chain = Column(ByteHexString(32), nullable=False)
src_bolt11 = Column(String(MAX_BOLT11), nullable=False)
src_expires_at = Column(DateTime(), nullable=False)
src_amount = Column(Integer(), nullable=False)
dest_payment_hash = Column(ByteHexString(32), nullable=False, unique=True)
dest_chain = Column(ByteHexString(32), nullable=False)
dest_bolt11 = Column(String(MAX_BOLT11), nullable=False)
dest_expires_at = Column(DateTime(), nullable=False)
dest_amount = Column(Integer(), nullable=False)
other_gw_payment_hash = Column(ByteHexString(32))
other_gw_url = Column(String(MAX_URL_LEN))
other_gw_chain = Column(ByteHexString(32))
other_gw_bolt11 = Column(String(MAX_BOLT11))
other_gw_expires_at = Column(DateTime())
other_gw_amount = Column(Integer())
class PaidRequest(Base):
__tablename__ = 'paid_requests'
src_payment_hash = Column(ByteHexString(32), primary_key=True)
src_chain = Column(ByteHexString(32), nullable=False)
src_bolt11 = Column(String(MAX_BOLT11), nullable=False)
src_expires_at = Column(DateTime(), nullable=False)
src_payment_preimage = Column(ByteHexString(32), nullable=False)
dest_payment_hash = Column(ByteHexString(32), nullable=False, unique=True)
dest_chain = Column(ByteHexString(32), nullable=False)
dest_bolt11 = Column(String(MAX_BOLT11), nullable=False)
dest_expires_at = Column(DateTime(), nullable=False)
dest_payment_preimage = Column(ByteHexString(32), nullable=False)
other_gw_payment_hash = Column(ByteHexString(32))
other_gw_url = Column(String(MAX_URL_LEN))
other_gw_chain = Column(ByteHexString(32))
other_gw_bolt11 = Column(String(MAX_BOLT11))
other_gw_expires_at = Column(DateTime())
other_gw_payment_preimage = Column(ByteHexString(32))
|
from sqlalchemy import Column, DateTime, Integer, Numeric, String, LargeBinary, TypeDecorator
from py_ln_gateway.db import Base
# There is no absolute limit for bolt11. There are practical limits based on QR code sizes.
# There's no maximum to find in the spec, but apparently 2048 for validation and storage is good enough as a guess.
# lnd accepts invoices up to 7089 bytes https://github.com/lightningnetwork/lnd/blob/master/zpay32/invoice.go#L79
MAX_BOLT11 = 2048
MAX_URL_LEN = 200
class ByteHexString(TypeDecorator):
"""Convert a string with hexadecimal digits to bytestring for storage and back."""
impl = LargeBinary
def process_bind_param(self, value, dialect):
if not value:
return None
if not isinstance(value, str):
raise TypeError("ByteHexString columns support only str values.")
return bytes.fromhex(value)
def process_result_value(self, value, dialect):
return value.hex() if value else None
class Price(Base):
__tablename__ = 'prices'
# The id is composed by the src_chain_id followed by the dest_chain_id
src_dest = Column(ByteHexString(2 * 32), primary_key=True)
price = Column(Numeric(10,4), nullable=False)
class PendingRequest(Base):
__tablename__ = 'pending_requests'
src_payment_hash = Column(ByteHexString(32), primary_key=True)
src_chain = Column(ByteHexString(32), nullable=False)
src_bolt11 = Column(String(MAX_BOLT11), nullable=False)
src_expires_at = Column(DateTime(), nullable=False)
src_amount = Column(Integer(), nullable=False)
dest_payment_hash = Column(ByteHexString(32), nullable=False, unique=True)
dest_chain = Column(ByteHexString(32), nullable=False)
dest_bolt11 = Column(String(MAX_BOLT11), nullable=False)
dest_expires_at = Column(DateTime(), nullable=False)
dest_amount = Column(Integer(), nullable=False)
other_gw_payment_hash = Column(ByteHexString(32))
other_gw_url = Column(String(MAX_URL_LEN))
other_gw_chain = Column(ByteHexString(32))
other_gw_bolt11 = Column(String(MAX_BOLT11))
other_gw_expires_at = Column(DateTime())
other_gw_amount = Column(Integer())
class PaidRequest(Base):
__tablename__ = 'paid_requests'
src_payment_hash = Column(ByteHexString(32), primary_key=True)
src_chain = Column(ByteHexString(32), nullable=False)
src_bolt11 = Column(String(MAX_BOLT11), nullable=False)
src_expires_at = Column(DateTime(), nullable=False)
src_payment_preimage = Column(ByteHexString(32), nullable=False)
dest_payment_hash = Column(ByteHexString(32), nullable=False, unique=True)
dest_chain = Column(ByteHexString(32), nullable=False)
dest_bolt11 = Column(String(MAX_BOLT11), nullable=False)
dest_expires_at = Column(DateTime(), nullable=False)
dest_payment_preimage = Column(ByteHexString(32), nullable=False)
other_gw_payment_hash = Column(ByteHexString(32))
other_gw_url = Column(String(MAX_URL_LEN))
other_gw_chain = Column(ByteHexString(32))
other_gw_bolt11 = Column(String(MAX_BOLT11))
other_gw_expires_at = Column(DateTime())
other_gw_payment_preimage = Column(ByteHexString(32))
| en | 0.925946 | # There is no absolute limit for bolt11. There are practical limits based on QR code sizes. # There's no maximum to find in the spec, but apparently 2048 for validation and storage is good enough as a guess. # lnd accepts invoices up to 7089 bytes https://github.com/lightningnetwork/lnd/blob/master/zpay32/invoice.go#L79 Convert a string with hexadecimal digits to bytestring for storage and back. # The id is composed by the src_chain_id followed by the dest_chain_id | 2.346922 | 2 |
Test29_tf/apis/constant.py | hooloong/My_TensorFlow | 3 | 6616996 | '''
tf.constant
constant(
value,
dtype=None,
shape=None,
name='Const',
verify_shape=False
)
根据 value 的值生成一个 shape 维度的常量张量
参数列表:
参数名 必选 类型 说明
value 是 常量数值或者 list 输出张量的值
dtype 否 dtype 输出张量元素类型
shape 否 1 维整形张量或 array 输出张量的维度
name 否 string 张量名称
verify_shape 否 Boolean 检测 shape 是否和 value 的 shape 一致,若为 Fasle,不一致时,会用最后一个元素将 shape 补全
'''
#!/usr/bin/python
import tensorflow as tf
import numpy as np
a = tf.constant([1,2,3,4,5,6],shape=[2,3])
b = tf.constant(-1,shape=[3,2])
c = tf.matmul(a,b)
e = tf.constant(np.arange(1,13,dtype=np.int32),shape=[2,2,3])
f = tf.constant(np.arange(13,25,dtype=np.int32),shape=[2,3,2])
g = tf.matmul(e,f)
with tf.Session() as sess:
print (sess.run(a))
print ("##################################")
print (sess.run(b))
print ("##################################")
print (sess.run(c))
print ("##################################")
print (sess.run(e))
print ("##################################")
print (sess.run(f))
print ("##################################")
print (sess.run(g)) | '''
tf.constant
constant(
value,
dtype=None,
shape=None,
name='Const',
verify_shape=False
)
根据 value 的值生成一个 shape 维度的常量张量
参数列表:
参数名 必选 类型 说明
value 是 常量数值或者 list 输出张量的值
dtype 否 dtype 输出张量元素类型
shape 否 1 维整形张量或 array 输出张量的维度
name 否 string 张量名称
verify_shape 否 Boolean 检测 shape 是否和 value 的 shape 一致,若为 Fasle,不一致时,会用最后一个元素将 shape 补全
'''
#!/usr/bin/python
import tensorflow as tf
import numpy as np
a = tf.constant([1,2,3,4,5,6],shape=[2,3])
b = tf.constant(-1,shape=[3,2])
c = tf.matmul(a,b)
e = tf.constant(np.arange(1,13,dtype=np.int32),shape=[2,2,3])
f = tf.constant(np.arange(13,25,dtype=np.int32),shape=[2,3,2])
g = tf.matmul(e,f)
with tf.Session() as sess:
print (sess.run(a))
print ("##################################")
print (sess.run(b))
print ("##################################")
print (sess.run(c))
print ("##################################")
print (sess.run(e))
print ("##################################")
print (sess.run(f))
print ("##################################")
print (sess.run(g)) | zh | 0.698552 | tf.constant constant( value, dtype=None, shape=None, name='Const', verify_shape=False ) 根据 value 的值生成一个 shape 维度的常量张量 参数列表: 参数名 必选 类型 说明 value 是 常量数值或者 list 输出张量的值 dtype 否 dtype 输出张量元素类型 shape 否 1 维整形张量或 array 输出张量的维度 name 否 string 张量名称 verify_shape 否 Boolean 检测 shape 是否和 value 的 shape 一致,若为 Fasle,不一致时,会用最后一个元素将 shape 补全 #!/usr/bin/python #################################") #################################") #################################") #################################") #################################") | 3.270003 | 3 |
face_recognition/mysql_module/fetch_mysql_data.py | eugenegalaxy/victim_face_recognition | 0 | 6616997 | import mysql.connector
import os
ENCODING = 'latin1' # Default utf-8 encoding fails to read BLOB(images) data
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# ============================ FOR DATABASE USERS, PLEASE WRITE IN THE INFORMATION ============================
# Host address:
g_host_address = 'localhost'
# Database name
g_database_name = 'ReallyARobot'
# Username:
g_username = 'root'
# Password:
g_password = '<PASSWORD>'
# 1. Database table containing employee information (name, age, nationality, etc)
g_emp_prof = 'employee_profiles'
# 2. Database table containing employee images (Must be of BLOB type) TODO -> not only BLOBs, but also references
g_emp_images = 'employee_images'
# 3. Database table column with employee names (ASSUMED TO BE IN table 1.) Used for verification
g_emp_name = 'fullName'
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
def save_image_on_disk(data, filename):
# Convert binary data to proper format and write it on Hard Disk
if not isinstance(data, bytes):
data = data.encode(ENCODING)
with open(filename, 'wb') as file:
file.write(data)
file.close()
def save_data_text_on_disk(data, filename):
with open(filename, "a") as file:
file.write(str(data))
file.close()
def query_database(query, args=None):
try:
connection = mysql.connector.connect(host=g_host_address,
database=g_database_name,
user=g_username,
#password=g_password
)
connection.set_charset_collation(ENCODING) # Default utf-8 encoding fails to read BLOB(images) data
cursor = connection.cursor()
if args is not None:
cursor.execute(query, args)
else:
cursor.execute(query)
data = cursor.fetchall()
return data
except mysql.connector.Error as error:
print(error)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
print('MySQL connection is closed')
def fetch_table_description(table_name):
print('Reading data from {} table'.format(table_name))
query = '''DESCRIBE {}'''.format(table_name)
data = query_database(query)
description_dic = {
'Table_name': table_name,
'Primary_key_column': list(),
'Foreign_key_column': list(),
'BLOB_data_column': list(),
}
for idx, row in enumerate(data):
description_dic['Row_{}'.format(idx)] = row
for entry in row:
if entry == 'PRI':
description_dic['Primary_key_column'].append([row[0], idx])
if entry == 'MUL':
description_dic['Foreign_key_column'].append([row[0], idx])
if entry == 'longblob' or entry == 'mediumblob' or entry == 'blob':
description_dic['BLOB_data_column'].append([row[0], idx])
return description_dic
# Function assumes that table 'g_emp_images' is connected to table g_emp_prof by FOREIGN KEY
def save_employee_data(save_path):
'''Function combines two tables data by foreign key and saves data on disk.'''
# ============================= TEXT PART ===================================
query = '''
SELECT `COLUMN_NAME`
FROM `INFORMATION_SCHEMA`.`COLUMNS`
WHERE `TABLE_SCHEMA`= %s
AND `TABLE_NAME`= %s;
'''
args = (g_database_name, g_emp_prof)
column_fields = query_database(query, args)
column_names = [val for sublist in column_fields for val in sublist]
query = '''SELECT * FROM {}'''.format('employee_profiles')
all_entries = query_database(query)
# ============================= IMAGES PART =======================================
query = '''
SELECT referenced_column_name, table_name, column_name
FROM information_schema.KEY_COLUMN_USAGE
WHERE table_schema = %s
AND referenced_table_name = %s;
'''
args = (g_database_name, g_emp_prof)
refer_info = query_database(query, args) # [(parent_column_name, child_table_name, child_column_name)]
if refer_info:
parent_column_name = refer_info[0][0]
child_table_name = refer_info[0][1]
child_column_name = refer_info[0][2]
child_table_desc = fetch_table_description(child_table_name)
if child_table_desc['BLOB_data_column']:
BLOB_column_name = child_table_desc['BLOB_data_column'][0][0] # Shows name of table column that has BLOB object
query = '''
SELECT p.{0}, i.{1} FROM {2} p
INNER JOIN {3} i
ON i.{4} = p.{5};
'''.format(g_emp_name, BLOB_column_name, g_emp_prof, g_emp_images, child_column_name, parent_column_name)
merged_data = query_database(query)
# ============================= SAVING TO DISK PART =======================================
for item in merged_data:
counter = 0 # TODO replace this filename numerator with already existing from directory_utils!
person_name = item[0].replace(" ", "_")
directory_path = os.path.join(save_path, person_name)
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
full_path_img = "{0}/{1}_{2}.{3}".format(directory_path, person_name, str(counter), 'jpg')
while os.path.isfile(full_path_img): # if image exists with same filename, iterate counter (Example "image_0.jpg -> image_1.jpg")
counter += 1
full_path_img = "{0}/{1}_{2}.{3}".format(directory_path, person_name, str(counter), 'jpg')
save_image_on_disk(item[1], full_path_img)
full_path_txt = "{0}/{1}.{2}".format(directory_path, 'info', 'txt')
if not os.path.isfile(full_path_txt): # If info txt already exists, it won't save the same info on every photo iteration
for entry in all_entries:
if item[0] in entry:
for idx, row in enumerate(entry): # <-- iterates over each entry
data_str = "{0}: {1}\n".format(column_names[idx], row)
save_data_text_on_disk(data_str, full_path_txt)
else:
print('There are no images in the table.')
else:
print('Two tables are not connected by any FOREIGN KEY')
save_employee_data('face_recognition/images/mysql_database')
| import mysql.connector
import os
ENCODING = 'latin1' # Default utf-8 encoding fails to read BLOB(images) data
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# ============================ FOR DATABASE USERS, PLEASE WRITE IN THE INFORMATION ============================
# Host address:
g_host_address = 'localhost'
# Database name
g_database_name = 'ReallyARobot'
# Username:
g_username = 'root'
# Password:
g_password = '<PASSWORD>'
# 1. Database table containing employee information (name, age, nationality, etc)
g_emp_prof = 'employee_profiles'
# 2. Database table containing employee images (Must be of BLOB type) TODO -> not only BLOBs, but also references
g_emp_images = 'employee_images'
# 3. Database table column with employee names (ASSUMED TO BE IN table 1.) Used for verification
g_emp_name = 'fullName'
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
def save_image_on_disk(data, filename):
# Convert binary data to proper format and write it on Hard Disk
if not isinstance(data, bytes):
data = data.encode(ENCODING)
with open(filename, 'wb') as file:
file.write(data)
file.close()
def save_data_text_on_disk(data, filename):
with open(filename, "a") as file:
file.write(str(data))
file.close()
def query_database(query, args=None):
try:
connection = mysql.connector.connect(host=g_host_address,
database=g_database_name,
user=g_username,
#password=g_password
)
connection.set_charset_collation(ENCODING) # Default utf-8 encoding fails to read BLOB(images) data
cursor = connection.cursor()
if args is not None:
cursor.execute(query, args)
else:
cursor.execute(query)
data = cursor.fetchall()
return data
except mysql.connector.Error as error:
print(error)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
print('MySQL connection is closed')
def fetch_table_description(table_name):
print('Reading data from {} table'.format(table_name))
query = '''DESCRIBE {}'''.format(table_name)
data = query_database(query)
description_dic = {
'Table_name': table_name,
'Primary_key_column': list(),
'Foreign_key_column': list(),
'BLOB_data_column': list(),
}
for idx, row in enumerate(data):
description_dic['Row_{}'.format(idx)] = row
for entry in row:
if entry == 'PRI':
description_dic['Primary_key_column'].append([row[0], idx])
if entry == 'MUL':
description_dic['Foreign_key_column'].append([row[0], idx])
if entry == 'longblob' or entry == 'mediumblob' or entry == 'blob':
description_dic['BLOB_data_column'].append([row[0], idx])
return description_dic
# Function assumes that table 'g_emp_images' is connected to table g_emp_prof by FOREIGN KEY
def save_employee_data(save_path):
'''Function combines two tables data by foreign key and saves data on disk.'''
# ============================= TEXT PART ===================================
query = '''
SELECT `COLUMN_NAME`
FROM `INFORMATION_SCHEMA`.`COLUMNS`
WHERE `TABLE_SCHEMA`= %s
AND `TABLE_NAME`= %s;
'''
args = (g_database_name, g_emp_prof)
column_fields = query_database(query, args)
column_names = [val for sublist in column_fields for val in sublist]
query = '''SELECT * FROM {}'''.format('employee_profiles')
all_entries = query_database(query)
# ============================= IMAGES PART =======================================
query = '''
SELECT referenced_column_name, table_name, column_name
FROM information_schema.KEY_COLUMN_USAGE
WHERE table_schema = %s
AND referenced_table_name = %s;
'''
args = (g_database_name, g_emp_prof)
refer_info = query_database(query, args) # [(parent_column_name, child_table_name, child_column_name)]
if refer_info:
parent_column_name = refer_info[0][0]
child_table_name = refer_info[0][1]
child_column_name = refer_info[0][2]
child_table_desc = fetch_table_description(child_table_name)
if child_table_desc['BLOB_data_column']:
BLOB_column_name = child_table_desc['BLOB_data_column'][0][0] # Shows name of table column that has BLOB object
query = '''
SELECT p.{0}, i.{1} FROM {2} p
INNER JOIN {3} i
ON i.{4} = p.{5};
'''.format(g_emp_name, BLOB_column_name, g_emp_prof, g_emp_images, child_column_name, parent_column_name)
merged_data = query_database(query)
# ============================= SAVING TO DISK PART =======================================
for item in merged_data:
counter = 0 # TODO replace this filename numerator with already existing from directory_utils!
person_name = item[0].replace(" ", "_")
directory_path = os.path.join(save_path, person_name)
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
full_path_img = "{0}/{1}_{2}.{3}".format(directory_path, person_name, str(counter), 'jpg')
while os.path.isfile(full_path_img): # if image exists with same filename, iterate counter (Example "image_0.jpg -> image_1.jpg")
counter += 1
full_path_img = "{0}/{1}_{2}.{3}".format(directory_path, person_name, str(counter), 'jpg')
save_image_on_disk(item[1], full_path_img)
full_path_txt = "{0}/{1}.{2}".format(directory_path, 'info', 'txt')
if not os.path.isfile(full_path_txt): # If info txt already exists, it won't save the same info on every photo iteration
for entry in all_entries:
if item[0] in entry:
for idx, row in enumerate(entry): # <-- iterates over each entry
data_str = "{0}: {1}\n".format(column_names[idx], row)
save_data_text_on_disk(data_str, full_path_txt)
else:
print('There are no images in the table.')
else:
print('Two tables are not connected by any FOREIGN KEY')
save_employee_data('face_recognition/images/mysql_database')
| en | 0.487109 | # Default utf-8 encoding fails to read BLOB(images) data # -------------------------------------------------------------------------------------------------------------- # -------------------------------------------------------------------------------------------------------------- # ============================ FOR DATABASE USERS, PLEASE WRITE IN THE INFORMATION ============================ # Host address: # Database name # Username: # Password: # 1. Database table containing employee information (name, age, nationality, etc) # 2. Database table containing employee images (Must be of BLOB type) TODO -> not only BLOBs, but also references # 3. Database table column with employee names (ASSUMED TO BE IN table 1.) Used for verification # -------------------------------------------------------------------------------------------------------------- # -------------------------------------------------------------------------------------------------------------- # Convert binary data to proper format and write it on Hard Disk #password=g_password # Default utf-8 encoding fails to read BLOB(images) data DESCRIBE {} # Function assumes that table 'g_emp_images' is connected to table g_emp_prof by FOREIGN KEY Function combines two tables data by foreign key and saves data on disk. # ============================= TEXT PART =================================== SELECT `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA`= %s AND `TABLE_NAME`= %s; SELECT * FROM {} # ============================= IMAGES PART ======================================= SELECT referenced_column_name, table_name, column_name FROM information_schema.KEY_COLUMN_USAGE WHERE table_schema = %s AND referenced_table_name = %s; # [(parent_column_name, child_table_name, child_column_name)] # Shows name of table column that has BLOB object SELECT p.{0}, i.{1} FROM {2} p INNER JOIN {3} i ON i.{4} = p.{5}; # ============================= SAVING TO DISK PART ======================================= # TODO replace this filename numerator with already existing from directory_utils! # if image exists with same filename, iterate counter (Example "image_0.jpg -> image_1.jpg") # If info txt already exists, it won't save the same info on every photo iteration # <-- iterates over each entry | 2.379244 | 2 |
db/mvtec_preprocess.py | samsgood0310/Unsupervised-Defect-Segmentation | 1 | 6616998 | import cv2
import os
import random
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--src_path', help='path of original dataset', type=str, required=True)
parser.add_argument('--save_path', help='path to save proprocessed dataset', type=str, required=True)
parser.add_argument('--val_ratio', help='ratio of val set', type=float, default=0.05)
args = parser.parse_args()
return args
def mirror(image):
image_m = image.copy()
image_m = image_m[:, ::-1]
return image_m
def flip(image):
image_f = image.copy()
image_f = image_f[::-1, :]
return image_f
def rotation(image, range):
_h, _w = image.shape[0: 2]
center = (_w // 2, _h // 2)
rot = random.uniform(range[0], range[1])
image_r = image.copy()
M = cv2.getRotationMatrix2D(center, rot, 1)
image_r = cv2.warpAffine(image_r, M, (_w, _h), borderMode=cv2.BORDER_REPLICATE)
return image_r
def crop(image, crop_size, margin):
height, width = image.shape[0: 2]
x_offset = random.randint(margin[0], width - crop_size[0] - margin[0])
y_offset = random.randint(margin[0], height - crop_size[1] - margin[1])
return image[y_offset: y_offset+crop_size[1], x_offset: x_offset+crop_size[0]]
if __name__ == '__main__':
TEXTURE = ['carpet', 'grid', 'leather', 'tile', 'wood']
OBJECT = ['bottle','cable','capsule', 'hazelnut', 'metal_nut', 'pill', 'screw', 'toothbrush', 'transistor', 'zipper']
args = parse_args()
src_path = args.src_path
save_path = args.save_path
val_ratio = args.val_ratio
new_set_path = os.path.join(save_path, 'mvtec_pre')
if not os.path.exists(new_set_path):
os.mkdir(new_set_path)
for item in os.listdir(src_path):
item_path = os.path.join(src_path, item)
if os.path.isfile(item_path):
continue
print('Arrange {}...'.format(item))
if item in TEXTURE:
IsTexture = True
elif item in OBJECT:
IsTexture = False
else:
raise Exception('Wrong type')
# make item directory
new_set_item_path = os.path.join(new_set_path, item)
if not os.path.exists(new_set_item_path):
os.mkdir(new_set_item_path)
# arragne test set
test_img_dir = os.path.join(item_path, 'test')
gt_dir = os.path.join(item_path, 'ground_truth')
save_test_dir = os.path.join(new_set_item_path, 'test')
save_gt_dir = os.path.join(new_set_item_path, 'ground_truth')
if not os.path.exists(save_test_dir):
os.mkdir(save_test_dir)
if not os.path.exists(save_gt_dir):
os.mkdir(save_gt_dir)
for ano in os.listdir(test_img_dir):
save_test_ano_dir = os.path.join(save_test_dir, ano)
save_gt_ano_dir = os.path.join(save_gt_dir, ano)
if not os.path.exists(save_test_ano_dir):
os.mkdir(save_test_ano_dir)
if ano != 'good':
if not os.path.exists(save_gt_ano_dir):
os.mkdir(save_gt_ano_dir)
for img_name in os.listdir(os.path.join(test_img_dir, ano)):
img_id = img_name.split('.')[0]
img = cv2.imread(os.path.join(test_img_dir, ano, img_name))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if ano != 'good':
mask = cv2.imread(os.path.join(gt_dir, ano, '{}_mask.png'.format(img_id)))
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
if IsTexture is True:
img = cv2.resize(img, (256, 256))
if ano != 'good':
mask = cv2.resize(mask, (256, 256))
else:
img = cv2.resize(img, (128, 128))
if ano != 'good':
mask = cv2.resize(mask, (128, 128))
cv2.imwrite(os.path.join(save_test_ano_dir, img_name), img)
if ano != 'good':
mask[mask < 128] = 0
mask[mask >= 128] = 255
cv2.imwrite(os.path.join(save_gt_ano_dir, '{}_mask.png'.format(img_id)), mask)
# arrange train & val set
train_img_dir = os.path.join(item_path, 'train', 'good')
save_train_img_dir = os.path.join(new_set_item_path, 'train', 'good')
if not os.path.exists(os.path.join(new_set_item_path, 'train')):
os.mkdir(os.path.join(new_set_item_path, 'train'))
os.mkdir(save_train_img_dir)
save_val_img_dir = os.path.join(new_set_item_path, 'val')
if not os.path.exists(save_val_img_dir):
os.mkdir(save_val_img_dir)
# get val list
image_list = os.listdir(train_img_dir)
image_num = len(image_list)
val_num = int(image_num * val_ratio)
val_id_list = []
for _ in range(val_num):
if len(val_id_list) >= val_num:
break
val_id = random.randint(0, image_num-1)
if val_id not in val_id_list:
val_id_list.append(val_id)
# get & save images
for i, image in enumerate(image_list):
img = cv2.imread(os.path.join(train_img_dir, image))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_id = image.split('.')[0]
if IsTexture is True:
img = cv2.resize(img, (256, 256))
if i in val_id_list:
cv2.imwrite(os.path.join(save_val_img_dir, '{}.png'.format(img_id)), img)
else:
# crop from original image
img_ori = crop(img, [128, 128], [0, 0])
cv2.imwrite(os.path.join(save_train_img_dir, '{}_o.png'.format(img_id)), img_ori)
# crop from three rotation images
for k in range(0, 3):
img_r = rotation(img, [-20, 20])
img_r = crop(img_r, [128, 128], [30, 30])
cv2.imwrite(os.path.join(save_train_img_dir, '{}_r{:d}.png'.format(img_id, k)),img_r)
# crop from mirrored image
img_m = mirror(img)
img_m = crop(img_m, [128, 128], [0, 0])
cv2.imwrite(os.path.join(save_train_img_dir, '{}_m.png'.format(img_id)), img_m)
# crop from flipped image
img_f = flip(img)
img_f = crop(img_f, [128, 128], [0, 0])
cv2.imwrite(os.path.join(save_train_img_dir, '{}_f.png'.format(img_id)), img_f)
else:
img = cv2.resize(img, (128, 128))
if i in val_id_list:
cv2.imwrite(os.path.join(save_val_img_dir, '{}.png'.format(img_id)), img)
else:
# crop from original image
cv2.imwrite(os.path.join(save_train_img_dir, '{}_o.png'.format(img_id)), img)
# crop from three rotation images
for k in range(0, 3):
img_r = rotation(img, [90*(k+1), 90*(k+1)])
cv2.imwrite(os.path.join(save_train_img_dir, '{}_r{:d}.png'.format(img_id, k)),img_r)
# crop from mirrored image
img_m = mirror(img)
cv2.imwrite(os.path.join(save_train_img_dir, '{}_m.png'.format(img_id)), img_m)
# crop from flipped image
img_f = flip(img)
cv2.imwrite(os.path.join(save_train_img_dir, '{}_f.png'.format(img_id)), img_f) | import cv2
import os
import random
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--src_path', help='path of original dataset', type=str, required=True)
parser.add_argument('--save_path', help='path to save proprocessed dataset', type=str, required=True)
parser.add_argument('--val_ratio', help='ratio of val set', type=float, default=0.05)
args = parser.parse_args()
return args
def mirror(image):
image_m = image.copy()
image_m = image_m[:, ::-1]
return image_m
def flip(image):
image_f = image.copy()
image_f = image_f[::-1, :]
return image_f
def rotation(image, range):
_h, _w = image.shape[0: 2]
center = (_w // 2, _h // 2)
rot = random.uniform(range[0], range[1])
image_r = image.copy()
M = cv2.getRotationMatrix2D(center, rot, 1)
image_r = cv2.warpAffine(image_r, M, (_w, _h), borderMode=cv2.BORDER_REPLICATE)
return image_r
def crop(image, crop_size, margin):
height, width = image.shape[0: 2]
x_offset = random.randint(margin[0], width - crop_size[0] - margin[0])
y_offset = random.randint(margin[0], height - crop_size[1] - margin[1])
return image[y_offset: y_offset+crop_size[1], x_offset: x_offset+crop_size[0]]
if __name__ == '__main__':
TEXTURE = ['carpet', 'grid', 'leather', 'tile', 'wood']
OBJECT = ['bottle','cable','capsule', 'hazelnut', 'metal_nut', 'pill', 'screw', 'toothbrush', 'transistor', 'zipper']
args = parse_args()
src_path = args.src_path
save_path = args.save_path
val_ratio = args.val_ratio
new_set_path = os.path.join(save_path, 'mvtec_pre')
if not os.path.exists(new_set_path):
os.mkdir(new_set_path)
for item in os.listdir(src_path):
item_path = os.path.join(src_path, item)
if os.path.isfile(item_path):
continue
print('Arrange {}...'.format(item))
if item in TEXTURE:
IsTexture = True
elif item in OBJECT:
IsTexture = False
else:
raise Exception('Wrong type')
# make item directory
new_set_item_path = os.path.join(new_set_path, item)
if not os.path.exists(new_set_item_path):
os.mkdir(new_set_item_path)
# arragne test set
test_img_dir = os.path.join(item_path, 'test')
gt_dir = os.path.join(item_path, 'ground_truth')
save_test_dir = os.path.join(new_set_item_path, 'test')
save_gt_dir = os.path.join(new_set_item_path, 'ground_truth')
if not os.path.exists(save_test_dir):
os.mkdir(save_test_dir)
if not os.path.exists(save_gt_dir):
os.mkdir(save_gt_dir)
for ano in os.listdir(test_img_dir):
save_test_ano_dir = os.path.join(save_test_dir, ano)
save_gt_ano_dir = os.path.join(save_gt_dir, ano)
if not os.path.exists(save_test_ano_dir):
os.mkdir(save_test_ano_dir)
if ano != 'good':
if not os.path.exists(save_gt_ano_dir):
os.mkdir(save_gt_ano_dir)
for img_name in os.listdir(os.path.join(test_img_dir, ano)):
img_id = img_name.split('.')[0]
img = cv2.imread(os.path.join(test_img_dir, ano, img_name))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if ano != 'good':
mask = cv2.imread(os.path.join(gt_dir, ano, '{}_mask.png'.format(img_id)))
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
if IsTexture is True:
img = cv2.resize(img, (256, 256))
if ano != 'good':
mask = cv2.resize(mask, (256, 256))
else:
img = cv2.resize(img, (128, 128))
if ano != 'good':
mask = cv2.resize(mask, (128, 128))
cv2.imwrite(os.path.join(save_test_ano_dir, img_name), img)
if ano != 'good':
mask[mask < 128] = 0
mask[mask >= 128] = 255
cv2.imwrite(os.path.join(save_gt_ano_dir, '{}_mask.png'.format(img_id)), mask)
# arrange train & val set
train_img_dir = os.path.join(item_path, 'train', 'good')
save_train_img_dir = os.path.join(new_set_item_path, 'train', 'good')
if not os.path.exists(os.path.join(new_set_item_path, 'train')):
os.mkdir(os.path.join(new_set_item_path, 'train'))
os.mkdir(save_train_img_dir)
save_val_img_dir = os.path.join(new_set_item_path, 'val')
if not os.path.exists(save_val_img_dir):
os.mkdir(save_val_img_dir)
# get val list
image_list = os.listdir(train_img_dir)
image_num = len(image_list)
val_num = int(image_num * val_ratio)
val_id_list = []
for _ in range(val_num):
if len(val_id_list) >= val_num:
break
val_id = random.randint(0, image_num-1)
if val_id not in val_id_list:
val_id_list.append(val_id)
# get & save images
for i, image in enumerate(image_list):
img = cv2.imread(os.path.join(train_img_dir, image))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_id = image.split('.')[0]
if IsTexture is True:
img = cv2.resize(img, (256, 256))
if i in val_id_list:
cv2.imwrite(os.path.join(save_val_img_dir, '{}.png'.format(img_id)), img)
else:
# crop from original image
img_ori = crop(img, [128, 128], [0, 0])
cv2.imwrite(os.path.join(save_train_img_dir, '{}_o.png'.format(img_id)), img_ori)
# crop from three rotation images
for k in range(0, 3):
img_r = rotation(img, [-20, 20])
img_r = crop(img_r, [128, 128], [30, 30])
cv2.imwrite(os.path.join(save_train_img_dir, '{}_r{:d}.png'.format(img_id, k)),img_r)
# crop from mirrored image
img_m = mirror(img)
img_m = crop(img_m, [128, 128], [0, 0])
cv2.imwrite(os.path.join(save_train_img_dir, '{}_m.png'.format(img_id)), img_m)
# crop from flipped image
img_f = flip(img)
img_f = crop(img_f, [128, 128], [0, 0])
cv2.imwrite(os.path.join(save_train_img_dir, '{}_f.png'.format(img_id)), img_f)
else:
img = cv2.resize(img, (128, 128))
if i in val_id_list:
cv2.imwrite(os.path.join(save_val_img_dir, '{}.png'.format(img_id)), img)
else:
# crop from original image
cv2.imwrite(os.path.join(save_train_img_dir, '{}_o.png'.format(img_id)), img)
# crop from three rotation images
for k in range(0, 3):
img_r = rotation(img, [90*(k+1), 90*(k+1)])
cv2.imwrite(os.path.join(save_train_img_dir, '{}_r{:d}.png'.format(img_id, k)),img_r)
# crop from mirrored image
img_m = mirror(img)
cv2.imwrite(os.path.join(save_train_img_dir, '{}_m.png'.format(img_id)), img_m)
# crop from flipped image
img_f = flip(img)
cv2.imwrite(os.path.join(save_train_img_dir, '{}_f.png'.format(img_id)), img_f) | en | 0.842682 | # make item directory # arragne test set # arrange train & val set # get val list # get & save images # crop from original image # crop from three rotation images # crop from mirrored image # crop from flipped image # crop from original image # crop from three rotation images # crop from mirrored image # crop from flipped image | 2.488984 | 2 |
DicomFilter.py | beresandras/LHYP | 0 | 6616999 | import numpy as np
import pandas as pd
import os
import pydicom
from tqdm import tqdm
class DicomFilter:
def __init__(self, sourceDirs, keepAttributes=["StudyDescription", "SeriesDescription", "PatientSex", "PatientWeight", "ScanningSequence", "HeartRate", "PatientPosition", "PixelSpacing"], dropAttributes=["PixelData"], recursive=False):
self.paths = []
self.filenames = []
self.data = {}
self.dataLength = 0
for sourceDir in sourceDirs:
if recursive:
[self._loadDir(subDir[0], dropAttributes, recursive=False) for subDir in os.walk(sourceDir)]
else:
self._loadDir(sourceDir, dropAttributes, recursive=False)
if len(self.filenames) == 0:
print('Error: no dicom files found in the given directories')
newData = self.data.copy()
for attr in self.data:
if attr not in keepAttributes:
try:
if len(set(self.data[attr])) <= 1:
del newData[attr]
except TypeError:
try:
if len(set(tuple(e) for e in self.data[attr])) <= 1:
del newData[attr]
except:
del newData[attr]
self.data = newData
def _loadDir(self, sourceDir, dropAttributes, recursive):
filenames = sorted(os.listdir(sourceDir))
filenames = [filename for filename in filenames if filename.find('.dcm') != -1]
for filename in filenames:
dicomData = pydicom.dcmread(os.path.join(sourceDir, filename))
attributes = [attr for attr in dir(dicomData) if attr[0].isupper()]
for attr in self.data:
if attr not in attributes:
self.data[attr].append(None)
for attr in attributes:
if attr not in dropAttributes:
if attr not in self.data:
self.data[attr] = [None] * self.dataLength
self.data[attr].append(getattr(dicomData, attr))
self.dataLength += 1
self.paths.append(len(filenames) * sourceDir)
self.filenames.append(filenames)
def getDataFrame(self):
return pd.DataFrame(data=self.data)
if __name__=='__main__':
dicomFilter = DicomFilter(["../data/10635813AMR806/lale"], keepAttributes=[], recursive=True)
dataFrame = dicomFilter.getDataFrame()
dataFrame.to_csv("out.csv", sep="\t") | import numpy as np
import pandas as pd
import os
import pydicom
from tqdm import tqdm
class DicomFilter:
def __init__(self, sourceDirs, keepAttributes=["StudyDescription", "SeriesDescription", "PatientSex", "PatientWeight", "ScanningSequence", "HeartRate", "PatientPosition", "PixelSpacing"], dropAttributes=["PixelData"], recursive=False):
self.paths = []
self.filenames = []
self.data = {}
self.dataLength = 0
for sourceDir in sourceDirs:
if recursive:
[self._loadDir(subDir[0], dropAttributes, recursive=False) for subDir in os.walk(sourceDir)]
else:
self._loadDir(sourceDir, dropAttributes, recursive=False)
if len(self.filenames) == 0:
print('Error: no dicom files found in the given directories')
newData = self.data.copy()
for attr in self.data:
if attr not in keepAttributes:
try:
if len(set(self.data[attr])) <= 1:
del newData[attr]
except TypeError:
try:
if len(set(tuple(e) for e in self.data[attr])) <= 1:
del newData[attr]
except:
del newData[attr]
self.data = newData
def _loadDir(self, sourceDir, dropAttributes, recursive):
filenames = sorted(os.listdir(sourceDir))
filenames = [filename for filename in filenames if filename.find('.dcm') != -1]
for filename in filenames:
dicomData = pydicom.dcmread(os.path.join(sourceDir, filename))
attributes = [attr for attr in dir(dicomData) if attr[0].isupper()]
for attr in self.data:
if attr not in attributes:
self.data[attr].append(None)
for attr in attributes:
if attr not in dropAttributes:
if attr not in self.data:
self.data[attr] = [None] * self.dataLength
self.data[attr].append(getattr(dicomData, attr))
self.dataLength += 1
self.paths.append(len(filenames) * sourceDir)
self.filenames.append(filenames)
def getDataFrame(self):
return pd.DataFrame(data=self.data)
if __name__=='__main__':
dicomFilter = DicomFilter(["../data/10635813AMR806/lale"], keepAttributes=[], recursive=True)
dataFrame = dicomFilter.getDataFrame()
dataFrame.to_csv("out.csv", sep="\t") | none | 1 | 2.512161 | 3 | |
snake_client/init_state.py | mlambir/channels_talk_pyconar2016 | 12 | 6617000 | <reponame>mlambir/channels_talk_pyconar2016
from math import sin
import pygame
from noise import snoise2
from utils import COLORS
octaves = 4
freq = 32.0 * octaves
def _get_noise_val(x, y, t):
return snoise2((x + t) / freq, (y + t / 5) / freq, octaves)
class InitState(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.start = False
self.t = 0
self.title_font = pygame.font.Font('fonts/upheavtt.ttf', 20)
txt = "PYCON SNAKE"
self.title_text_surfaces = []
for c in txt:
t_w, t_h = self.title_font.size(c)
surf = pygame.Surface((t_w + 1, t_h + 1))
surf.blit(self.title_font.render(c, False, COLORS[3]), (1, 1))
surf.blit(self.title_font.render(c, False, COLORS[0]), (0, 0))
surf.set_colorkey(0)
self.title_text_surfaces.append(surf)
self.title_width = sum(s.get_width() for s in self.title_text_surfaces)
self.title_height = max(s.get_height() for s in self.title_text_surfaces)
def get_next_state(self):
if self.start:
from game_state import GameState
return GameState(self.width, self.height)
return self
def update(self, selected):
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
self.start = True
def draw(self, buffer):
t = pygame.time.get_ticks() / 100
for x in range(self.width):
for y in range(self.height):
color = _get_noise_val(x, y, self.t)
if color < -.4:
buffer.set_at((x, y), COLORS[1])
elif color < 0:
if (x + y) % 2 == 0:
buffer.set_at((x, y), COLORS[1])
else:
buffer.set_at((x, y), COLORS[2])
elif color < .3:
if (x + y) % 3 == 0:
buffer.set_at((x, y), COLORS[1])
else:
buffer.set_at((x, y), COLORS[2])
else:
if (x + y) % 5 == 0:
buffer.set_at((x, y), COLORS[1])
else:
buffer.set_at((x, y), COLORS[2])
self.t += .2
base_x = (self.width - self.title_width) / 2
base_y = (self.height - self.title_height) / 2
for n, s in enumerate(self.title_text_surfaces):
buffer.blit(s, (base_x, base_y + 3 * sin(t + n)))
base_x += s.get_width() | from math import sin
import pygame
from noise import snoise2
from utils import COLORS
octaves = 4
freq = 32.0 * octaves
def _get_noise_val(x, y, t):
return snoise2((x + t) / freq, (y + t / 5) / freq, octaves)
class InitState(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.start = False
self.t = 0
self.title_font = pygame.font.Font('fonts/upheavtt.ttf', 20)
txt = "PYCON SNAKE"
self.title_text_surfaces = []
for c in txt:
t_w, t_h = self.title_font.size(c)
surf = pygame.Surface((t_w + 1, t_h + 1))
surf.blit(self.title_font.render(c, False, COLORS[3]), (1, 1))
surf.blit(self.title_font.render(c, False, COLORS[0]), (0, 0))
surf.set_colorkey(0)
self.title_text_surfaces.append(surf)
self.title_width = sum(s.get_width() for s in self.title_text_surfaces)
self.title_height = max(s.get_height() for s in self.title_text_surfaces)
def get_next_state(self):
if self.start:
from game_state import GameState
return GameState(self.width, self.height)
return self
def update(self, selected):
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
self.start = True
def draw(self, buffer):
t = pygame.time.get_ticks() / 100
for x in range(self.width):
for y in range(self.height):
color = _get_noise_val(x, y, self.t)
if color < -.4:
buffer.set_at((x, y), COLORS[1])
elif color < 0:
if (x + y) % 2 == 0:
buffer.set_at((x, y), COLORS[1])
else:
buffer.set_at((x, y), COLORS[2])
elif color < .3:
if (x + y) % 3 == 0:
buffer.set_at((x, y), COLORS[1])
else:
buffer.set_at((x, y), COLORS[2])
else:
if (x + y) % 5 == 0:
buffer.set_at((x, y), COLORS[1])
else:
buffer.set_at((x, y), COLORS[2])
self.t += .2
base_x = (self.width - self.title_width) / 2
base_y = (self.height - self.title_height) / 2
for n, s in enumerate(self.title_text_surfaces):
buffer.blit(s, (base_x, base_y + 3 * sin(t + n)))
base_x += s.get_width() | none | 1 | 2.955327 | 3 | |
2.Model Implementation/0. DNN/jskim_DNN/activations.py | jskim0406/Study | 0 | 6617001 | import numpy as np
from numpy import ndarray
import numpy as np
from .base import Operation
from typing import List
class Sigmoid(Operation):
'''
sigmoid 활성화 함수
'''
def __init__(self)->None:
'''pass'''
super().__init__()
def _output(self,inference: bool)->np.ndarray:
'''
출력값 계산
'''
return 1.0/(1.0 + np.exp(-1.0 * self.input_))
def _input_grad(self, output_grad :np.ndarray)->np.ndarray:
'''
입력에 대한 gradient 계산
'''
sigmoid_backward = self.output * (1.0 - self.output)
# * 연산 순서 전환 디버깅함
input_grad = sigmoid_backward*output_grad
return input_grad
class Linear(Operation):
'''
항등 활성화 함수
'''
def __init__(self) -> None:
'''기반 클래스의 생성자 메서드 실행'''
super().__init__()
def _output(self, inference: bool) -> np.ndarray:
'''입력을 그대로 출력'''
return self.input_
def _input_grad(self, output_grad: np.ndarray) -> np.ndarray:
'''그대로 출력'''
return output_grad
class Tanh(Operation):
'''
Hyperbolic tangent activation function
'''
def __init__(self) -> None:
super().__init__()
def _output(self, inference: bool) -> ndarray:
return np.tanh(self.input_)
def _input_grad(self, output_grad: ndarray) -> ndarray:
return output_grad * (1 - self.output * self.output)
class ReLU(Operation):
'''
Hyperbolic tangent activation function
'''
def __init__(self) -> None:
super().__init__()
def _output(self, inference: bool) -> ndarray:
return np.clip(self.input_, 0, None)
def _input_grad(self, output_grad: ndarray) -> ndarray:
mask = self.output >= 0
return output_grad * mask
| import numpy as np
from numpy import ndarray
import numpy as np
from .base import Operation
from typing import List
class Sigmoid(Operation):
'''
sigmoid 활성화 함수
'''
def __init__(self)->None:
'''pass'''
super().__init__()
def _output(self,inference: bool)->np.ndarray:
'''
출력값 계산
'''
return 1.0/(1.0 + np.exp(-1.0 * self.input_))
def _input_grad(self, output_grad :np.ndarray)->np.ndarray:
'''
입력에 대한 gradient 계산
'''
sigmoid_backward = self.output * (1.0 - self.output)
# * 연산 순서 전환 디버깅함
input_grad = sigmoid_backward*output_grad
return input_grad
class Linear(Operation):
'''
항등 활성화 함수
'''
def __init__(self) -> None:
'''기반 클래스의 생성자 메서드 실행'''
super().__init__()
def _output(self, inference: bool) -> np.ndarray:
'''입력을 그대로 출력'''
return self.input_
def _input_grad(self, output_grad: np.ndarray) -> np.ndarray:
'''그대로 출력'''
return output_grad
class Tanh(Operation):
'''
Hyperbolic tangent activation function
'''
def __init__(self) -> None:
super().__init__()
def _output(self, inference: bool) -> ndarray:
return np.tanh(self.input_)
def _input_grad(self, output_grad: ndarray) -> ndarray:
return output_grad * (1 - self.output * self.output)
class ReLU(Operation):
'''
Hyperbolic tangent activation function
'''
def __init__(self) -> None:
super().__init__()
def _output(self, inference: bool) -> ndarray:
return np.clip(self.input_, 0, None)
def _input_grad(self, output_grad: ndarray) -> ndarray:
mask = self.output >= 0
return output_grad * mask
| ko | 0.996697 | sigmoid 활성화 함수 pass 출력값 계산 입력에 대한 gradient 계산 # * 연산 순서 전환 디버깅함 항등 활성화 함수 기반 클래스의 생성자 메서드 실행 입력을 그대로 출력 그대로 출력 Hyperbolic tangent activation function Hyperbolic tangent activation function | 3.04926 | 3 |
card_game.py | radonintro1234/cards-war-game-pythom | 1 | 6617002 | <reponame>radonintro1234/cards-war-game-pythom<filename>card_game.py<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
__version__ = "0.2.0"
__author__ = "<NAME>"
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2020 <NAME> (radonintro1234)'
"""
Author : <NAME> (radonintro1234)
Github : https://github.com/radonintro1234
License : MIT
Copyright (c) 2020 <NAME> (radonintro1234)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
'''
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
'''
from random import shuffle
suits = ["Hearts", "Spades" ,"Clubs", "Diamonds"]
ranks = ["Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine" , "Ten", "Jack", "Queen", "King", "Ace"]
values = {"Two" : 2,
"Three" : 3,
"Four" : 4,
"Five" : 5,
"Six" : 6,
"Seven" : 7,
"Eight" : 8,
"Nine" : 9,
"Ten" : 10,
"Jack" : 11,
"Queen" : 12,
"King" : 13,
"Ace" : 14
}
'''
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
'''
class Card():
"""Class to create a card"""
def __init__(self, rank, suit):
self.rank = rank
self.suit = suit
self.value = values[rank]
def __str__(self):
"Function to print the Card"
return str(self.rank) + " of " + self.suit + "."
'''
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
'''
class Deck():
"""Class to create a Deck"""
def __init__(self):
self.deck_list = []
for rank in ranks:
for suit in suits:
temp_card = Card(rank,suit)
self.deck_list.append(temp_card)
shuffle(self.deck_list)
def __str__(self):
"Function to print a deck"
return str("This deck has ") + str(len(self.deck_list)) + str(" cards.")
def popCard(self):
"Function to pop the Top Card"
return self.deck_list.pop()
def shuffleDeck(self):
"Function to manually shuffle the Deck"
shuffle(self.deck_list)
def viewCards(self):
"Function to viewall the cards of a Deck"
for card in self.deck_list:
print(card)
'''
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
'''
class Player():
"""Class to create a Player"""
def __init__(self, name):
self.name = name
self.player_deck = []
def __str__(self):
"Function to print Player Properties"
return self.name + " has " + str(len(self.player_deck)) + " Cards."
def addCard(self, card):
"Function to add a card to Player's Deck"
self.player_deck.append(card)
shuffle(self.player_deck)
def popCard(self):
"Function to pop a card from Player's Deck"
shuffle(self.player_deck)
return self.player_deck.pop()
def viewCards(self):
"Function to view all the player's Current cards"
for card in self.player_deck:
print(card)
'''
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
'''
def main():
# Start the program
current_players = []
deck1 = Deck()
print(deck1)
temp_name = str(input("Give a Player1 name : "))
player1 = Player(temp_name)
current_players.append(player1)
temp_name = str(input("Give a Player2 name : "))
player2 = Player(temp_name)
current_players.append(player2)
temp_name = str(input("Give a Player3 name : "))
player3 = Player(temp_name)
current_players.append(player3)
while len(deck1.deck_list) != 0:
if len(deck1.deck_list) == 0:
break
player1.addCard(deck1.popCard())
if len(deck1.deck_list) == 0:
break
player2.addCard(deck1.popCard())
if len(deck1.deck_list) == 0:
break
player3.addCard(deck1.popCard())
print(player1)
print(player2)
print(player3)
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
round_no = 0
while(len(current_players) == 3):
round_no += 1
print("-" *100)
print("Round " + str(round_no) + " :-")
player1_popdeck = []
player2_popdeck = []
player3_popdeck = []
player1_popdeck.append(player1.popCard())
player2_popdeck.append(player2.popCard())
player3_popdeck.append(player3.popCard())
a=player1_popdeck[-1].value
b=player2_popdeck[-1].value
c=player3_popdeck[-1].value
print("Comparison : " + str(player1_popdeck[-1]) + " -vs- " + str(player2_popdeck[-1]) + " -vs- " + str(player3_popdeck[-1]))
if (a>b and a>c):
print("Round Winner: " + player1.name)
player1.player_deck.extend(player1_popdeck)
player1.player_deck.extend(player2_popdeck)
player1.player_deck.extend(player3_popdeck)
if (b>a and b>c):
print("Round Winner: " + player2.name)
player2.player_deck.extend(player1_popdeck)
player2.player_deck.extend(player2_popdeck)
player2.player_deck.extend(player3_popdeck)
if (c>a and c>b):
print("Round Winner: " + player3.name)
player3.player_deck.extend(player1_popdeck)
player3.player_deck.extend(player2_popdeck)
player3.player_deck.extend(player3_popdeck)
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
if (a==b and a>c):
while(a==b):
print("a=b war..... lets try again!\n")
if len(player1.player_deck) == 0:
print(player1.name + " Has an Empty Deck\n")
current_players.remove(player1)
print("Round Winner: " + player2.name)
player2.player_deck.extend(player1_popdeck)
player2.player_deck.extend(player2_popdeck)
player2.player_deck.extend(player3_popdeck)
break
if len(player2.player_deck) == 0:
print(player2.name + " Has an Empty Deck\n")
current_players.remove(player2)
print("Round Winner: " + player1.name)
player1.player_deck.extend(player1_popdeck)
player1.player_deck.extend(player2_popdeck)
player1.player_deck.extend(player3_popdeck)
break
player1_popdeck.append(player1.popCard())
player2_popdeck.append(player2.popCard())
a=player1_popdeck[-1].value
b=player2_popdeck[-1].value
print("Comparison : " + str(player1_popdeck[-1]) + " -vs- " + str(player2_popdeck[-1]))
if(a>b):
print("Round Winner: " + player1.name)
player1.player_deck.extend(player1_popdeck)
player1.player_deck.extend(player2_popdeck)
player1.player_deck.extend(player3_popdeck)
if(b>a):
print("Round Winner: " + player2.name)
player2.player_deck.extend(player1_popdeck)
player2.player_deck.extend(player2_popdeck)
player2.player_deck.extend(player3_popdeck)
# -------------------------------------------------------------------------------------------------
if (b==c and b>a):
while(b==c):
print("b=c war..... lets try again!\n")
if len(player2.player_deck) == 0:
print(player2.name + " Has an Empty Deck\n")
current_players.remove(player2)
print("Round Winner: " + player3.name)
player3.player_deck.extend(player1_popdeck)
player3.player_deck.extend(player2_popdeck)
player3.player_deck.extend(player3_popdeck)
break
if len(player3.player_deck) == 0:
print(player3.name + " Has an Empty Deck\n")
current_players.remove(player3)
print("Round Winner: " + player2.name)
player2.player_deck.extend(player1_popdeck)
player2.player_deck.extend(player2_popdeck)
player2.player_deck.extend(player3_popdeck)
break
player2_popdeck.append(player2.popCard())
player3_popdeck.append(player3.popCard())
b=player2_popdeck[-1].value
c=player3_popdeck[-1].value
print("Comparison : " + str(player2_popdeck[-1]) + " -vs- " + str(player3_popdeck[-1]))
if(b>c):
print("Round Winner: " + player2.name)
player2.player_deck.extend(player1_popdeck)
player2.player_deck.extend(player2_popdeck)
player2.player_deck.extend(player3_popdeck)
if(c>b):
print("Round Winner: " + player3.name)
player3.player_deck.extend(player1_popdeck)
player3.player_deck.extend(player2_popdeck)
player3.player_deck.extend(player3_popdeck)
# -------------------------------------------------------------------------------------------------
if (a==c and a>b):
while(a==c):
print("a=c war..... lets try again!\n")
if len(player1.player_deck) == 0:
print(player1.name + " Has an Empty Deck\n")
current_players.remove(player1)
print("Round Winner: " + player3.name)
player3.player_deck.extend(player1_popdeck)
player3.player_deck.extend(player2_popdeck)
player3.player_deck.extend(player3_popdeck)
break
if len(player3.player_deck) == 0:
print(player3.name + " Has an Empty Deck\n")
current_players.remove(player3)
print("Round Winner: " + player1.name)
player1.player_deck.extend(player1_popdeck)
player1.player_deck.extend(player2_popdeck)
player1.player_deck.extend(player3_popdeck)
break
player1_popdeck.append(player1.popCard())
player3_popdeck.append(player3.popCard())
a=player1_popdeck[-1].value
c=player3_popdeck[-1].value
print("Comparison : " + str(player1_popdeck[-1]) + " -vs- " + str(player3_popdeck[-1]))
if(a>c):
print("Round Winner: " + player1.name)
player1.player_deck.extend(player1_popdeck)
player1.player_deck.extend(player2_popdeck)
player1.player_deck.extend(player3_popdeck)
if(c>a):
print("Round Winner: " + player3.name)
player3.player_deck.extend(player1_popdeck)
player3.player_deck.extend(player2_popdeck)
player3.player_deck.extend(player3_popdeck)
# -------------------------------------------------------------------------------------------------
# check if any player has zero cards left
try:
if len(player1.player_deck) == 0:
print(player1.name + " Has an Empty Deck\n")
current_players.remove(player1)
except:
pass
try:
if len(player2.player_deck) == 0:
print(player2.name + " Has an Empty Deck\n")
current_players.remove(player2)
except:
pass
try:
if len(player3.player_deck) == 0:
print(player3.name + " Has an Empty Deck\n")
current_players.remove(player3)
except:
pass
# -----------------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------------
if len(current_players) == 1:
print("-" *100)
print(current_players[0].name + " wins!")
elif len(current_players) == 2:
print("-" *100)
print("Only Two Players Remaning : " + current_players[0].name + " and " + current_players[1].name)
game_on = True
player_one = current_players[0]
player_two = current_players[1]
while game_on:
print("-" *100)
round_no += 1
print(f"Round {round_no}")
if len(player_one.player_deck)==0:
print("-"*100)
print(player_one.name + " has an Empty Deck.\n")
print("-"*100)
print("-"*100)
print( player_two.name + " wins.\n")
print("-"*100)
print("-"*100)
game_on = False
break
if len(player_two.player_deck)==0:
print("-"*100)
print(player_two.name + " has an empty Deck.\n")
print("-"*100)
print("-"*100)
print(player_one.name + " wins.\n")
print("-"*100)
print("-"*100)
game_on = False
break
player_one_cards = []
player_one_cards.append(player_one.popCard())
player_two_cards = []
player_two_cards.append(player_two.popCard())
is_war = True
while is_war:
print("Comparison : " + str(player_one_cards[-1]) + " -vs- " + str(player_two_cards[-1]))
if player_one_cards[-1].value > player_two_cards[-1].value :
player_one.player_deck.extend(player_one_cards)
player_one.player_deck.extend(player_two_cards)
print("Round Winner : " + player_one.name)
is_war = False
break
elif player_one_cards[-1].value < player_two_cards[-1].value :
player_two.player_deck.extend(player_one_cards)
player_two.player_deck.extend(player_two_cards)
print("Round Winner : " + player_two.name)
is_war = False
break
else:
print('WAR! WAR! WAR! WAR! WAR!\n')
# This occurs when the cards are equal.
# We'll grab another card each and continue the current war.
# First check to see if player has enough cards
# Check to see if a player is out of cards:
if len(player_one.player_deck) == 0:
print("-"*100)
print(player_one.name + " has an Empty Deck.\n")
print("-"*100)
print("-"*100)
print( player_two.name + " Wins!\n")
print("-"*100)
print("-"*100)
game_on = False
break
elif len(player_two.player_deck) == 0:
print("-"*100)
print(player_two.name + " has an Empty Deck.\n")
print("-"*100)
print("-"*100)
print( player_one.name + " Wins!\n")
print("-"*100)
print("-"*100)
game_on = False
break
# Otherwise, we're still at war, so we'll add the next cards
else:
player_one_cards.append(player_one.popCard())
player_two_cards.append(player_two.popCard())
else:
print("ERROR!!!!!!!\n")
'''
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
'''
if __name__ == '__main__':
main()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
__version__ = "0.2.0"
__author__ = "<NAME>"
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2020 <NAME> (radonintro1234)'
"""
Author : <NAME> (radonintro1234)
Github : https://github.com/radonintro1234
License : MIT
Copyright (c) 2020 <NAME> (radonintro1234)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
'''
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
'''
from random import shuffle
suits = ["Hearts", "Spades" ,"Clubs", "Diamonds"]
ranks = ["Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine" , "Ten", "Jack", "Queen", "King", "Ace"]
values = {"Two" : 2,
"Three" : 3,
"Four" : 4,
"Five" : 5,
"Six" : 6,
"Seven" : 7,
"Eight" : 8,
"Nine" : 9,
"Ten" : 10,
"Jack" : 11,
"Queen" : 12,
"King" : 13,
"Ace" : 14
}
'''
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
'''
class Card():
"""Class to create a card"""
def __init__(self, rank, suit):
self.rank = rank
self.suit = suit
self.value = values[rank]
def __str__(self):
"Function to print the Card"
return str(self.rank) + " of " + self.suit + "."
'''
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
'''
class Deck():
"""Class to create a Deck"""
def __init__(self):
self.deck_list = []
for rank in ranks:
for suit in suits:
temp_card = Card(rank,suit)
self.deck_list.append(temp_card)
shuffle(self.deck_list)
def __str__(self):
"Function to print a deck"
return str("This deck has ") + str(len(self.deck_list)) + str(" cards.")
def popCard(self):
"Function to pop the Top Card"
return self.deck_list.pop()
def shuffleDeck(self):
"Function to manually shuffle the Deck"
shuffle(self.deck_list)
def viewCards(self):
"Function to viewall the cards of a Deck"
for card in self.deck_list:
print(card)
'''
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
'''
class Player():
"""Class to create a Player"""
def __init__(self, name):
self.name = name
self.player_deck = []
def __str__(self):
"Function to print Player Properties"
return self.name + " has " + str(len(self.player_deck)) + " Cards."
def addCard(self, card):
"Function to add a card to Player's Deck"
self.player_deck.append(card)
shuffle(self.player_deck)
def popCard(self):
"Function to pop a card from Player's Deck"
shuffle(self.player_deck)
return self.player_deck.pop()
def viewCards(self):
"Function to view all the player's Current cards"
for card in self.player_deck:
print(card)
'''
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
'''
def main():
# Start the program
current_players = []
deck1 = Deck()
print(deck1)
temp_name = str(input("Give a Player1 name : "))
player1 = Player(temp_name)
current_players.append(player1)
temp_name = str(input("Give a Player2 name : "))
player2 = Player(temp_name)
current_players.append(player2)
temp_name = str(input("Give a Player3 name : "))
player3 = Player(temp_name)
current_players.append(player3)
while len(deck1.deck_list) != 0:
if len(deck1.deck_list) == 0:
break
player1.addCard(deck1.popCard())
if len(deck1.deck_list) == 0:
break
player2.addCard(deck1.popCard())
if len(deck1.deck_list) == 0:
break
player3.addCard(deck1.popCard())
print(player1)
print(player2)
print(player3)
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
round_no = 0
while(len(current_players) == 3):
round_no += 1
print("-" *100)
print("Round " + str(round_no) + " :-")
player1_popdeck = []
player2_popdeck = []
player3_popdeck = []
player1_popdeck.append(player1.popCard())
player2_popdeck.append(player2.popCard())
player3_popdeck.append(player3.popCard())
a=player1_popdeck[-1].value
b=player2_popdeck[-1].value
c=player3_popdeck[-1].value
print("Comparison : " + str(player1_popdeck[-1]) + " -vs- " + str(player2_popdeck[-1]) + " -vs- " + str(player3_popdeck[-1]))
if (a>b and a>c):
print("Round Winner: " + player1.name)
player1.player_deck.extend(player1_popdeck)
player1.player_deck.extend(player2_popdeck)
player1.player_deck.extend(player3_popdeck)
if (b>a and b>c):
print("Round Winner: " + player2.name)
player2.player_deck.extend(player1_popdeck)
player2.player_deck.extend(player2_popdeck)
player2.player_deck.extend(player3_popdeck)
if (c>a and c>b):
print("Round Winner: " + player3.name)
player3.player_deck.extend(player1_popdeck)
player3.player_deck.extend(player2_popdeck)
player3.player_deck.extend(player3_popdeck)
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
if (a==b and a>c):
while(a==b):
print("a=b war..... lets try again!\n")
if len(player1.player_deck) == 0:
print(player1.name + " Has an Empty Deck\n")
current_players.remove(player1)
print("Round Winner: " + player2.name)
player2.player_deck.extend(player1_popdeck)
player2.player_deck.extend(player2_popdeck)
player2.player_deck.extend(player3_popdeck)
break
if len(player2.player_deck) == 0:
print(player2.name + " Has an Empty Deck\n")
current_players.remove(player2)
print("Round Winner: " + player1.name)
player1.player_deck.extend(player1_popdeck)
player1.player_deck.extend(player2_popdeck)
player1.player_deck.extend(player3_popdeck)
break
player1_popdeck.append(player1.popCard())
player2_popdeck.append(player2.popCard())
a=player1_popdeck[-1].value
b=player2_popdeck[-1].value
print("Comparison : " + str(player1_popdeck[-1]) + " -vs- " + str(player2_popdeck[-1]))
if(a>b):
print("Round Winner: " + player1.name)
player1.player_deck.extend(player1_popdeck)
player1.player_deck.extend(player2_popdeck)
player1.player_deck.extend(player3_popdeck)
if(b>a):
print("Round Winner: " + player2.name)
player2.player_deck.extend(player1_popdeck)
player2.player_deck.extend(player2_popdeck)
player2.player_deck.extend(player3_popdeck)
# -------------------------------------------------------------------------------------------------
if (b==c and b>a):
while(b==c):
print("b=c war..... lets try again!\n")
if len(player2.player_deck) == 0:
print(player2.name + " Has an Empty Deck\n")
current_players.remove(player2)
print("Round Winner: " + player3.name)
player3.player_deck.extend(player1_popdeck)
player3.player_deck.extend(player2_popdeck)
player3.player_deck.extend(player3_popdeck)
break
if len(player3.player_deck) == 0:
print(player3.name + " Has an Empty Deck\n")
current_players.remove(player3)
print("Round Winner: " + player2.name)
player2.player_deck.extend(player1_popdeck)
player2.player_deck.extend(player2_popdeck)
player2.player_deck.extend(player3_popdeck)
break
player2_popdeck.append(player2.popCard())
player3_popdeck.append(player3.popCard())
b=player2_popdeck[-1].value
c=player3_popdeck[-1].value
print("Comparison : " + str(player2_popdeck[-1]) + " -vs- " + str(player3_popdeck[-1]))
if(b>c):
print("Round Winner: " + player2.name)
player2.player_deck.extend(player1_popdeck)
player2.player_deck.extend(player2_popdeck)
player2.player_deck.extend(player3_popdeck)
if(c>b):
print("Round Winner: " + player3.name)
player3.player_deck.extend(player1_popdeck)
player3.player_deck.extend(player2_popdeck)
player3.player_deck.extend(player3_popdeck)
# -------------------------------------------------------------------------------------------------
if (a==c and a>b):
while(a==c):
print("a=c war..... lets try again!\n")
if len(player1.player_deck) == 0:
print(player1.name + " Has an Empty Deck\n")
current_players.remove(player1)
print("Round Winner: " + player3.name)
player3.player_deck.extend(player1_popdeck)
player3.player_deck.extend(player2_popdeck)
player3.player_deck.extend(player3_popdeck)
break
if len(player3.player_deck) == 0:
print(player3.name + " Has an Empty Deck\n")
current_players.remove(player3)
print("Round Winner: " + player1.name)
player1.player_deck.extend(player1_popdeck)
player1.player_deck.extend(player2_popdeck)
player1.player_deck.extend(player3_popdeck)
break
player1_popdeck.append(player1.popCard())
player3_popdeck.append(player3.popCard())
a=player1_popdeck[-1].value
c=player3_popdeck[-1].value
print("Comparison : " + str(player1_popdeck[-1]) + " -vs- " + str(player3_popdeck[-1]))
if(a>c):
print("Round Winner: " + player1.name)
player1.player_deck.extend(player1_popdeck)
player1.player_deck.extend(player2_popdeck)
player1.player_deck.extend(player3_popdeck)
if(c>a):
print("Round Winner: " + player3.name)
player3.player_deck.extend(player1_popdeck)
player3.player_deck.extend(player2_popdeck)
player3.player_deck.extend(player3_popdeck)
# -------------------------------------------------------------------------------------------------
# check if any player has zero cards left
try:
if len(player1.player_deck) == 0:
print(player1.name + " Has an Empty Deck\n")
current_players.remove(player1)
except:
pass
try:
if len(player2.player_deck) == 0:
print(player2.name + " Has an Empty Deck\n")
current_players.remove(player2)
except:
pass
try:
if len(player3.player_deck) == 0:
print(player3.name + " Has an Empty Deck\n")
current_players.remove(player3)
except:
pass
# -----------------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------------
if len(current_players) == 1:
print("-" *100)
print(current_players[0].name + " wins!")
elif len(current_players) == 2:
print("-" *100)
print("Only Two Players Remaning : " + current_players[0].name + " and " + current_players[1].name)
game_on = True
player_one = current_players[0]
player_two = current_players[1]
while game_on:
print("-" *100)
round_no += 1
print(f"Round {round_no}")
if len(player_one.player_deck)==0:
print("-"*100)
print(player_one.name + " has an Empty Deck.\n")
print("-"*100)
print("-"*100)
print( player_two.name + " wins.\n")
print("-"*100)
print("-"*100)
game_on = False
break
if len(player_two.player_deck)==0:
print("-"*100)
print(player_two.name + " has an empty Deck.\n")
print("-"*100)
print("-"*100)
print(player_one.name + " wins.\n")
print("-"*100)
print("-"*100)
game_on = False
break
player_one_cards = []
player_one_cards.append(player_one.popCard())
player_two_cards = []
player_two_cards.append(player_two.popCard())
is_war = True
while is_war:
print("Comparison : " + str(player_one_cards[-1]) + " -vs- " + str(player_two_cards[-1]))
if player_one_cards[-1].value > player_two_cards[-1].value :
player_one.player_deck.extend(player_one_cards)
player_one.player_deck.extend(player_two_cards)
print("Round Winner : " + player_one.name)
is_war = False
break
elif player_one_cards[-1].value < player_two_cards[-1].value :
player_two.player_deck.extend(player_one_cards)
player_two.player_deck.extend(player_two_cards)
print("Round Winner : " + player_two.name)
is_war = False
break
else:
print('WAR! WAR! WAR! WAR! WAR!\n')
# This occurs when the cards are equal.
# We'll grab another card each and continue the current war.
# First check to see if player has enough cards
# Check to see if a player is out of cards:
if len(player_one.player_deck) == 0:
print("-"*100)
print(player_one.name + " has an Empty Deck.\n")
print("-"*100)
print("-"*100)
print( player_two.name + " Wins!\n")
print("-"*100)
print("-"*100)
game_on = False
break
elif len(player_two.player_deck) == 0:
print("-"*100)
print(player_two.name + " has an Empty Deck.\n")
print("-"*100)
print("-"*100)
print( player_one.name + " Wins!\n")
print("-"*100)
print("-"*100)
game_on = False
break
# Otherwise, we're still at war, so we'll add the next cards
else:
player_one_cards.append(player_one.popCard())
player_two_cards.append(player_two.popCard())
else:
print("ERROR!!!!!!!\n")
'''
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
'''
if __name__ == '__main__':
main() | en | 0.410685 | #!/usr/bin/python # -*- coding: utf-8 -*- Author : <NAME> (radonintro1234) Github : https://github.com/radonintro1234 License : MIT Copyright (c) 2020 <NAME> (radonintro1234) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Class to create a card ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Class to create a Deck ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Class to create a Player ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ # Start the program # ------------------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------------------- # check if any player has zero cards left # ----------------------------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------------------------- # This occurs when the cards are equal. # We'll grab another card each and continue the current war. # First check to see if player has enough cards # Check to see if a player is out of cards: # Otherwise, we're still at war, so we'll add the next cards ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | 1.36807 | 1 |
test.py | Aczy156/compiler-theory-algorithm | 6 | 6617003 |
FIRST = {}
FOLLOW = {}
grammars = [
'E->TG',
'G->+TG',
'G->ε',
'T->FS',
'S->*FS',
'S->ε',
'F->(E)',
'F->i'
]
# grammars = [
# 'E->TG',
# 'G->+TG',
# 'G->-TG',
# 'G->ε',
# 'T->FS',
# 'S->*FS',
# 'S->/FS',
# 'S->ε',
# 'F->(E)',
# 'F->i',]
#初始化 first 集 和follow集合字典的键值对中的 值 为空
def initail():
for str in grammars :
part_begin = str.split("->")[0]
part_end = str.split("->")[1]
FIRST[part_begin] = ""
FOLLOW[part_begin] = "#"
###求first集 中第第一部分针对 -> 直接推出第一个字符为终结符 部分
def getFirst():
for str in grammars:
part_begin = str.split("->")[0]
part_end = str.split("->")[1]
if not part_end[0].isupper():
FIRST[part_begin] = FIRST.get(part_begin) + part_end[0]
##求first第二部分 针对 A -> B型 把B的first集加到A 的first集合中
def getFirst_2():
for str in grammars:
part_begin = ''
part_end = ''
part_begin += str.split('->')[0]
part_end += str.split('->')[1]
##如果型如A ->B 则把B的first集加到A 的first集中去
if part_end[0].isupper():
FIRST[part_begin] = FIRST.get(part_begin) + FIRST.get(part_end[0])
def getFisrt_3():
while(1):
test = FIRST
getFirst_2()
##去除重复项
for i , j in FIRST.items():
temp = ""
for word in list(set(j)):
temp += word
FIRST[i] = temp
if test == FIRST:
break
def getFOLLOW_3():
while(1):
test = FOLLOW
getFollow()
##去除重复项
for i , j in FOLLOW.items():
temp = ""
for word in list(set(j)):
temp += word
FOLLOW[i] = temp
if test == FOLLOW:
break
##计算follow集的第一部分,先计算 S -> A b 类型的
def getFollow():
for str in grammars:
part_begin = str.split("->")[0]
part_end = str.split("->")[1]
##如果是 S->a 直接推出终结符 则 continue
if len(part_end) == 1:
continue
##否则执行下面的操作
else:
#将->后面的分开再倒序
temp = []
for i in part_end:
temp.append(i)
temp.reverse()
#如果非终结符在句型的末端则把"#" 加入进去
if temp[0].isupper() :
FOLLOW[temp[0] ]= FOLLOW.get(temp[0]) + FOLLOW.get(part_begin)
temp1 = temp[0]
for i in temp[1:]:
if not i.isupper():
temp1 = i
else:
if temp1.isupper():
FOLLOW[i] = FOLLOW.get(i) + FIRST.get(temp1).replace("ε","")
if ('ε' in FIRST.get(temp1)):
FOLLOW[i] = FOLLOW.get(i) + FOLLOW.get(part_begin)
else:
FOLLOW[i] = FOLLOW.get(i) + temp1
temp1 = i
# 如果终结符在句型的末端
else:
temp1 = temp[0]
for i in temp[1:]:
if not i.isupper():
temp1 = i
else:
if temp1.isupper():
FOLLOW[i] = FOLLOW.get(i) + FIRST.get(temp1)
else:
FOLLOW[i] = FOLLOW.get(i) + temp1
temp1 = i
initail()
getFirst()
getFisrt_3()
getFisrt_3()
#print( FIRST )
getFOLLOW_3()
getFOLLOW_3()
#print(FOLLOW)
for i ,j in FIRST.items() :
str = j[0]
for temp in j[1:]:
str = str+ ',' +temp
print("FIRST("+ i + ")" + " = {"+str+"}")
for i ,j in FOLLOW.items():
str = j[0]
for temp in j[1:]:
str = str + ',' + temp
print("FOLLOW("+ i + ")" + " = {"+str+"}")
|
FIRST = {}
FOLLOW = {}
grammars = [
'E->TG',
'G->+TG',
'G->ε',
'T->FS',
'S->*FS',
'S->ε',
'F->(E)',
'F->i'
]
# grammars = [
# 'E->TG',
# 'G->+TG',
# 'G->-TG',
# 'G->ε',
# 'T->FS',
# 'S->*FS',
# 'S->/FS',
# 'S->ε',
# 'F->(E)',
# 'F->i',]
#初始化 first 集 和follow集合字典的键值对中的 值 为空
def initail():
for str in grammars :
part_begin = str.split("->")[0]
part_end = str.split("->")[1]
FIRST[part_begin] = ""
FOLLOW[part_begin] = "#"
###求first集 中第第一部分针对 -> 直接推出第一个字符为终结符 部分
def getFirst():
for str in grammars:
part_begin = str.split("->")[0]
part_end = str.split("->")[1]
if not part_end[0].isupper():
FIRST[part_begin] = FIRST.get(part_begin) + part_end[0]
##求first第二部分 针对 A -> B型 把B的first集加到A 的first集合中
def getFirst_2():
for str in grammars:
part_begin = ''
part_end = ''
part_begin += str.split('->')[0]
part_end += str.split('->')[1]
##如果型如A ->B 则把B的first集加到A 的first集中去
if part_end[0].isupper():
FIRST[part_begin] = FIRST.get(part_begin) + FIRST.get(part_end[0])
def getFisrt_3():
while(1):
test = FIRST
getFirst_2()
##去除重复项
for i , j in FIRST.items():
temp = ""
for word in list(set(j)):
temp += word
FIRST[i] = temp
if test == FIRST:
break
def getFOLLOW_3():
while(1):
test = FOLLOW
getFollow()
##去除重复项
for i , j in FOLLOW.items():
temp = ""
for word in list(set(j)):
temp += word
FOLLOW[i] = temp
if test == FOLLOW:
break
##计算follow集的第一部分,先计算 S -> A b 类型的
def getFollow():
for str in grammars:
part_begin = str.split("->")[0]
part_end = str.split("->")[1]
##如果是 S->a 直接推出终结符 则 continue
if len(part_end) == 1:
continue
##否则执行下面的操作
else:
#将->后面的分开再倒序
temp = []
for i in part_end:
temp.append(i)
temp.reverse()
#如果非终结符在句型的末端则把"#" 加入进去
if temp[0].isupper() :
FOLLOW[temp[0] ]= FOLLOW.get(temp[0]) + FOLLOW.get(part_begin)
temp1 = temp[0]
for i in temp[1:]:
if not i.isupper():
temp1 = i
else:
if temp1.isupper():
FOLLOW[i] = FOLLOW.get(i) + FIRST.get(temp1).replace("ε","")
if ('ε' in FIRST.get(temp1)):
FOLLOW[i] = FOLLOW.get(i) + FOLLOW.get(part_begin)
else:
FOLLOW[i] = FOLLOW.get(i) + temp1
temp1 = i
# 如果终结符在句型的末端
else:
temp1 = temp[0]
for i in temp[1:]:
if not i.isupper():
temp1 = i
else:
if temp1.isupper():
FOLLOW[i] = FOLLOW.get(i) + FIRST.get(temp1)
else:
FOLLOW[i] = FOLLOW.get(i) + temp1
temp1 = i
initail()
getFirst()
getFisrt_3()
getFisrt_3()
#print( FIRST )
getFOLLOW_3()
getFOLLOW_3()
#print(FOLLOW)
for i ,j in FIRST.items() :
str = j[0]
for temp in j[1:]:
str = str+ ',' +temp
print("FIRST("+ i + ")" + " = {"+str+"}")
for i ,j in FOLLOW.items():
str = j[0]
for temp in j[1:]:
str = str + ',' + temp
print("FOLLOW("+ i + ")" + " = {"+str+"}")
| zh | 0.904772 | # grammars = [ # 'E->TG', # 'G->+TG', # 'G->-TG', # 'G->ε', # 'T->FS', # 'S->*FS', # 'S->/FS', # 'S->ε', # 'F->(E)', # 'F->i',] #初始化 first 集 和follow集合字典的键值对中的 值 为空 ###求first集 中第第一部分针对 -> 直接推出第一个字符为终结符 部分 ##求first第二部分 针对 A -> B型 把B的first集加到A 的first集合中 ##如果型如A ->B 则把B的first集加到A 的first集中去 ##去除重复项 ##去除重复项 ##计算follow集的第一部分,先计算 S -> A b 类型的 ##如果是 S->a 直接推出终结符 则 continue ##否则执行下面的操作 #将->后面的分开再倒序 #如果非终结符在句型的末端则把"#" 加入进去 # 如果终结符在句型的末端 #print( FIRST ) #print(FOLLOW) | 3.17894 | 3 |
c4/policyengine/policyEngine.py | Brewgarten/c4-policy-engine | 0 | 6617004 | """
Copyright (c) IBM 2015-2017. All Rights Reserved.
Project name: c4-policy-engine
This project is licensed under the MIT License, see LICENSE
A policy engine implementation with support for events and actions as well as textual representations
"""
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from datetime import datetime
import inspect
import logging
import multiprocessing
import re
import socket
import time
import traceback
from c4.messaging import RouterClient
import c4.policies
import c4.policyengine.actions
import c4.policyengine.events
import c4.policyengine.events.operators
from c4.system.backend import Backend
from c4.system.configuration import (States as ConfigStates, Roles)
from c4.system.messages import Operation
from c4.utils.enum import Enum
from c4.utils.jsonutil import JSONSerializable
from c4.utils.logutil import ClassLogger
from c4.utils.util import (callWithVariableArguments,
getFormattedArgumentString, getFullModuleName, getModuleClasses)
log = logging.getLogger(__name__)
class States(Enum):
"""
Enumeration of states
"""
ENABLED = "enabled"
DISABLED = "disabled"
@ClassLogger
class Event(object):
"""
An event implementation
"""
__metaclass__ = ABCMeta
id = None
def __init__(self):
pass
# TODO: type, group, severity, description
# see http://www-01.ibm.com/support/knowledgecenter/SSULQD_7.1.0/com.ibm.nz.adm.doc/r_sysadm_template_event_rules.html
@abstractmethod
def evaluate(self):
"""
Evaluate the event
.. note::
Subclasses should implement this
:returns: value
"""
@property
def value(self):
"""
Value of the event
"""
return self.evaluate()
def __repr__(self, *args, **kwargs):
return "({0})".format(self.id)
def __str__(self, *args, **kwargs):
return "({0} -> {1})".format(self.id, self.evaluate())
@ClassLogger
class EventReference(Event):
"""
A reference to an :class:`Event`
:param event: event
:type event: :class:`Event`
:param arguments: arguments
:param keyValueArguments: key value arguments
"""
def __init__(self, event, arguments=None, keyValueArguments=None):
super(EventReference, self).__init__()
self.event = event
self.id = event.id
if arguments is None:
self.arguments = []
else:
self.arguments = arguments
if keyValueArguments is None:
self.keyValueArguments = {}
else:
self.keyValueArguments = keyValueArguments
def evaluate(self):
"""
Evaluate the specified event using the given
arguments and key value arguments
:returns: result
"""
try:
arguments = []
for argument in self.arguments:
if isinstance(argument, (EventReference, CachableEvent)):
arguments.append(argument.evaluate())
elif isinstance(argument, Event):
raise ValueError("'{0}' needs to be an EventReference".format(repr(argument)))
else:
arguments.append(argument)
keyValueArguments = {}
for key, value in self.keyValueArguments.items():
if isinstance(value, (EventReference, CachableEvent)):
keyValueArguments[key] = value.evaluate()
elif isinstance(value, Event):
raise ValueError("'{0}={1}' needs to be an EventReference".format(key, repr(value)))
else:
keyValueArguments[key] = value
return callWithVariableArguments(self.event.evaluate, *arguments, **keyValueArguments)
except Exception as exception:
self.log.error(self.event)
self.log.exception(exception)
def __repr__(self, *args, **kwargs):
return "({0}{1})".format(self.id,
getFormattedArgumentString(self.arguments, self.keyValueArguments))
def __str__(self, evaluatedValue=None, *args, **kwargs):
# TODO: what about if the value is actually None?
if evaluatedValue is None:
evaluatedValue = self.evaluate()
return "({0}{1} -> {2})".format(self.id,
getFormattedArgumentString(self.arguments, self.keyValueArguments),
evaluatedValue)
@ClassLogger
class Action(object):
"""
An action implementation
"""
__metaclass__ = ABCMeta
id = None
@abstractmethod
def perform(self):
"""
Perform specified action
.. note::
Subclasses should add arguments as needed
:returns: result
"""
def __repr__(self, *args, **kwargs):
return "{0}(...)".format(self.id)
@ClassLogger
class ActionReference(Action):
"""
A reference to an :class:`Action`
:param action: action
:type action: :class:`Action`
:param arguments: arguments
:param keyValueArguments: key value arguments
"""
def __init__(self, action, arguments=None, keyValueArguments=None):
self.action = action
self.id = action.id
if arguments is None:
self.arguments = []
else:
self.arguments = arguments
if keyValueArguments is None:
self.keyValueArguments = {}
else:
self.keyValueArguments = keyValueArguments
def perform(self):
"""
Perform specified action using the given
arguments and key value arguments
:returns: result
"""
try:
return callWithVariableArguments(self.action.perform, *self.arguments, **self.keyValueArguments)
except Exception as exception:
self.log.error(self.action)
self.log.exception(exception)
def __repr__(self, *args, **kwargs):
return "{0}{1}".format(self.id, getFormattedArgumentString(self.arguments, self.keyValueArguments))
@ClassLogger
class BinaryOperator(Event):
"""
A binary operator base class
:param one: event one
:type one: :class:`Event`
:param two: event two
:type two: :class:`Event`
"""
__metaclass__ = ABCMeta
id = "binaryOperator"
def __init__(self, one, two):
super(BinaryOperator, self).__init__()
self.one = ValueEvent.create(one)
self.two = ValueEvent.create(two)
@abstractmethod
def evaluateOperation(self, one, two):
"""
Evaluate the binary operation with the specified operands
"""
def evaluate(self):
one = self.one.evaluate()
two = self.two.evaluate()
return self.evaluateOperation(one, two)
def __repr__(self, *args, **kwargs):
return "({0} {1} {2})".format(repr(self.one), self.id, repr(self.two))
def __str__(self, *args, **kwargs):
return "({0} {1} {2} -> {3})".format(self.one, self.id, self.two, self.evaluate())
@ClassLogger
class Cache(dict):
"""
A memory-based dictionary cache
"""
def __init__(self):
super(Cache, self).__init__()
self.enabled = True
@ClassLogger
class CachableEvent(Event):
"""
An event which value can be cached
:param cache: cache
:type cache: :class:`Cache`
:param event: event
:type event: :class:`Event`
"""
def __init__(self, cache, event):
super(CachableEvent, self).__init__()
self.cache = cache
self.event = event
self.id = event.id
def evaluate(self):
if self.cache.enabled:
if self.id not in self.cache:
self.cache[self.id] = self.event.evaluate()
return self.cache[self.id]
else:
return self.event.evaluate()
def __repr__(self, *args, **kwargs):
return repr(self.event)
def __str__(self, *args, **kwargs):
if isinstance(self.event, EventReference):
return self.event.__str__(evaluatedValue=self.evaluate())
return "({0} -> {1})".format(self.id, self.evaluate())
@ClassLogger
class Policy(object):
"""
A policy base class
:param cache: cache
:type cache: :class:`Cache`
"""
__metaclass__ = ABCMeta
id = None
def __init__(self, cache=None):
self.cache = cache
self.state = States.ENABLED
@property
def description(self):
"""
Formatted description based on the doc string
"""
if not self.__doc__:
return ""
description = []
for line in self.__doc__.splitlines():
line = line.strip()
if line and not line.startswith(":"):
description.append(line)
return "\n".join(description)
@abstractmethod
def evaluateEvent(self):
"""
Evaluate the event to determine if the action for this policy should
to be performed
"""
@abstractmethod
def performActions(self):
"""
Perform actions specified for the policy if the event evaluated as
``True``
"""
def __hash__(self, *args, **kwargs):
return hash(repr(self))
def __repr__(self, *args, **kwargs):
return "{0}".format(self.id)
def __str__(self, *args, **kwargs):
return "{0}".format(self.id)
@ClassLogger
class PolicyComponent(Policy):
"""
A policy component consisting of an event and respective list of actions
:param name: name
:type name: str
:param event: event
:type event: :class:`Event`
:param actions: list of actions
:param actions: [:class:`ActionReference`]
"""
def __init__(self, name, event, actions, cache=None):
super(PolicyComponent, self).__init__(cache)
self.id = name
self.event = event
self.actions = actions
self.policyHashes = {}
self.policies = OrderedDict()
def addPolicy(self, policy):
"""
Add a child policy
:param policy: policy
:type policy: :class:`Policy`
"""
policyHash = hash(policy)
if policyHash in self.policyHashes:
self.log.error("policy '%s' already exists", repr(policy))
else:
self.policyHashes[policyHash] = policy
self.policies[policy.id] = policy
if isinstance(policy, PolicyComponent):
self.log.debug("'%s' added policy '%s' '%s'", self.id, policy.id, repr(policy))
else:
self.log.debug("'%s' added policy '%s'", self.id, policy.id)
def evaluateEvent(self):
return self.event.evaluate()
def performActions(self):
if self.actions:
for action in self.actions:
action.perform()
def __hash__(self, *args, **kwargs):
return hash(repr(self))
def __repr__(self, *args, **kwargs):
return "{0} -> {1}".format(repr(self.event), ",".join([str(action) for action in self.actions]))
def __str__(self, *args, **kwargs):
return "{0}: {1} -> {2}".format(self.id, self.event, ",".join([str(action) for action in self.actions]))
@ClassLogger
class PolicyDatabase(object):
"""
An abstraction of the underlying database where policies are stored
"""
def __init__(self):
self.store = Backend().keyValueStore
def addPolicyUsingName(self, fullPolicyName, policy):
"""
Add a policy
:param fullPolicyName: fully qualified policy name
:type fullPolicyName: str
:param policy: policy
:type policy: :class:`Policy`
"""
nameHierarchy = fullPolicyName.split("/")
if len(nameHierarchy) == 1:
# no parent
if self.policyExists(policy.id):
# with the ability to run policy engine on multiple nodes but with shared database this is acceptable
self.log.debug("policy '%s' already exists", repr(policy))
return False
else:
self.addPolicy(policy)
# check if we can add children
if hasattr(policy, "policies"):
for childPolicy in policy.policies.values():
self.addPolicyUsingName("{0}/{1}".format(fullPolicyName, childPolicy.id), childPolicy)
else:
self.log.warn("Parent child relationships not implemented yet")
return False
return True
def addPolicy(self, policy):
"""
Add a policy
:param policy: policy
:type policy: :class:`Policy`
"""
policyInfo = self.getPolicyInfo(policy.id)
if policyInfo:
self.log.error("policy '%s' already exists", policyInfo.name)
return None
properties = {}
if isinstance(policy, PolicyComponent):
representation = repr(policy)
policyType = "{}.{}".format(getFullModuleName(PolicyComponent), PolicyComponent.__name__)
else:
representation = policy.id
policyType = "{}.{}".format(getFullModuleName(policy), policy.__class__.__name__)
if policy.description:
properties["description"] = policy.description
policyKey = self.getKey(policy.id)
propertiesKey = "{policyKey}/properties".format(policyKey=policyKey)
representationKey = "{policyKey}/representation".format(policyKey=policyKey)
stateKey = "{policyKey}/state".format(policyKey=policyKey)
typeKey = "{policyKey}/type".format(policyKey=policyKey)
transaction = self.store.transaction
transaction.put(policyKey, policy.id)
transaction.put(representationKey, representation)
transaction.put(stateKey, policy.state.toJSON(includeClassInfo=True))
transaction.put(typeKey, policyType)
for key, value in properties.items():
propertyKey = "{propertiesKey}/{key}".format(propertiesKey=propertiesKey, key=key)
transaction.put(propertyKey, value)
transaction.commit()
if isinstance(policy, PolicyComponent):
self.log.debug("stored policy '%s' '%s'", policy.id, representation)
else:
self.log.debug("stored policy '%s'", policy.id)
def clear(self):
"""
Remove all policies
"""
self.store.deletePrefix("/policies/")
def disablePolicy(self, fullPolicyName):
"""
Disables the policy in the database given its name
:param fullPolicyName: fully qualified policy name
:type fullPolicyName: str
"""
stateKey = self.getKey(fullPolicyName, "state")
serializedState = self.store.get(stateKey)
if not serializedState:
self.log.error("could not disable '%s' because it does not exist", fullPolicyName)
return
self.store.put(stateKey, States.DISABLED.toJSON(includeClassInfo=True))
def enablePolicy(self, fullPolicyName):
"""
Enables the policy in the database given its name
:param fullPolicyName: fully qualified policy name
:type fullPolicyName: str
"""
stateKey = self.getKey(fullPolicyName, "state")
serializedState = self.store.get(stateKey)
if not serializedState:
self.log.error("could not enable '%s' because it does not exist", fullPolicyName)
return
self.store.put(stateKey, States.ENABLED.toJSON(includeClassInfo=True))
def getKey(self, fullPolicyName, *additionalParts):
"""
Get key for the specified policy
:param fullPolicyName: fully qualified policy name
:type fullPolicyName: str
:returns: key
:rtype: str
"""
nameHierarchy = fullPolicyName.split("/")
keyParts = [""]
for name in nameHierarchy:
keyParts.extend(["policies", name])
keyParts.extend(additionalParts)
return "/".join(keyParts)
def getNestedPolicyInfos(self, parentKey, policyInfoMapping):
"""
Get policies based on parent key and the already retrieved values
:param parentKey: parent key
:type parentKey: str
:param policyInfoMapping: policy information mapping of key-value
:type policyInfoMapping: dict
"""
policies = {}
policyKeyExpression = re.compile(r"(?P<policyKey>{parentKey}/policies/[^/]+)$".format(parentKey=parentKey))
for key in policyInfoMapping.keys():
match = policyKeyExpression.match(key)
if match:
policyKey = match.group("policyKey")
propertiesKey = "{policyKey}/properties/".format(policyKey=policyKey)
representationKey = "{policyKey}/representation".format(policyKey=policyKey)
stateKey = "{policyKey}/state".format(policyKey=policyKey)
typeKey = "{policyKey}/type".format(policyKey=policyKey)
policyProperties = {
key.replace(propertiesKey, ""): value
for key, value in policyInfoMapping.items()
if key.startswith(propertiesKey)
}
policyInfo = PolicyInfo(
policyInfoMapping[policyKey],
policyInfoMapping[representationKey],
policyInfoMapping[stateKey],
policyInfoMapping[typeKey],
policyProperties
)
policyInfo.policies = self.getNestedPolicyInfos(policyKey, policyInfoMapping)
policies[policyInfo.name] = policyInfo
return policies
def getNumberOfTopLevelPolicies(self):
"""
Get number of top level policies
:returns: number of top level policies
:rtype: int
"""
pattern = re.compile("/policies/[^/]+$")
policies = {
pattern.search(key)
for key, _ in self.store.getPrefix("/policies/")
if pattern.search(key)
}
return len(policies)
def getPolicyInfo(self, fullPolicyName):
"""
Get policy info for the specified policy
:param fullPolicyName: fully qualified policy name
:type fullPolicyName: str
:returns: policy info
:rtype: :class:`PolicyInfo`
"""
policyKey = self.getKey(fullPolicyName)
policyName = self.store.get(policyKey)
if not policyName:
return None
policyPrefix = policyKey + "/"
# map from key to value and deserialize value automatically
policyInfoMapping = {
key : JSONSerializable.fromJSON(value) if JSONSerializable.classAttribute in value else value
for key, value in self.store.getPrefix(policyPrefix)
}
# deal with policy information
propertiesKey = "{policyKey}/properties/".format(policyKey=policyKey)
representationKey = "{policyKey}/representation".format(policyKey=policyKey)
stateKey = "{policyKey}/state".format(policyKey=policyKey)
typeKey = "{policyKey}/type".format(policyKey=policyKey)
policyProperties = {
key.replace(propertiesKey, ""): value
for key, value in policyInfoMapping.items()
if key.startswith(propertiesKey)
}
policyInfo = PolicyInfo(
policyName,
policyInfoMapping[representationKey],
policyInfoMapping[stateKey],
policyInfoMapping[typeKey],
policyProperties
)
policyInfo.policies = self.getNestedPolicyInfos(policyKey, policyInfoMapping)
return policyInfo
def getPolicyInfos(self):
"""
Get all policy infos
:returns: list of policy infos
:rtype: [:class:`PolicyInfo`]
"""
policyInfoMapping = {
key: JSONSerializable.fromJSON(value) if JSONSerializable.classAttribute in value else value
for key, value in self.store.getPrefix("/policies")
}
return self.getNestedPolicyInfos("", policyInfoMapping).values()
def getPolicyState(self, fullPolicyName):
"""
Get the state of 'policy' if it exists
:param fullPolicyName: fully qualified policy name
:type fullPolicyName: str
:returns: state of the policy if it exists else None
:rtype: :class:`States`
"""
stateKey = self.getKey(fullPolicyName, "state")
value = self.store.get(stateKey)
if value is None:
self.log.error("could not get state because '%s' does not exist", fullPolicyName)
return None
return JSONSerializable.fromJSON(value)
def policyExists(self, fullPolicyName):
"""
Does the specified policy already exist
:param fullPolicyName: fully qualified policy name
:type fullPolicyName: str
:returns: whether policy exists
:rtype: bool
"""
stateKey = self.getKey(fullPolicyName, "state")
serializedState = self.store.get(stateKey)
if serializedState:
return True
return False
@ClassLogger
class PolicyEngine(object):
"""
Policy engine that allows iterating over policies and performing their actions
based on whether the specified event matches
:param properties: properties
:type properties: dict
"""
def __init__(self, properties=None):
self.events = {}
self.cache = Cache()
self.cache.enabled = False
self.actions = {}
self.policyParser = PolicyParser(self)
self.policies = OrderedDict()
self.policyDatabase = PolicyDatabase()
self.properties = properties or {}
self.loadActions()
self.loadEvents()
orderedList = self.properties.get("policies", [] )
includePoliciesFromDatabase = self.properties.get("include.policies.database", False)
self.loadDefaultPolicies(orderedList=orderedList, includePoliciesFromDatabase=includePoliciesFromDatabase)
def addAction(self, action):
"""
Add known action
:param action: action
:type action: :class:`Action`
"""
self.log.debug("adding action '%s'", action.id)
self.actions[action.id] = action
def addActions(self, actions):
"""
Add known actions
:param actions: actions
:type actions: [:class:`Action`]
"""
for action in actions:
self.addAction(action)
def addEvent(self, event):
"""
Add known event
:param event: event
:type event: :class:`Event`
"""
if event == Event:
self.log.warn("cannot add base event class")
elif issubclass(event, (UnaryOperator, BinaryOperator)):
self.log.warn("cannot add operator '%s'", event.id)
else:
self.log.debug("adding event '%s'", event.id)
self.events[event.id] = event
def addEvents(self, events):
"""
Add known events
:param events: events
:type events: [:class:`Event`]
"""
for event in events:
self.addEvent(event)
def addPolicy(self, policy):
"""
Add a policy
:param policy: policy
:type policy: :class:`Policy`
"""
if self.policyDatabase.addPolicyUsingName(policy.id, policy):
self.policies[policy.id] = policy
elif policy.id not in self.policies:
self.policies[policy.id] = policy
def addPolicies(self, policies):
"""
Add policies
:param policies: policies
:type policies: [:class:`Policy`]
"""
for policy in policies:
self.addPolicy(policy)
def convertToPolicies(self, policyInfos):
"""
Convert policy infos into actual policies
:param policyInfos: policy infos
:type policyInfos: [:class:`PolicyInfo`]
:returns: policies
:rtype: [:class:`Policy`]
"""
policyComponentType = "{}.{}".format(getFullModuleName(PolicyComponent), PolicyComponent.__name__)
policies = []
for policyInfo in policyInfos:
if policyInfo.type == policyComponentType:
try:
policy = self.policyParser.parsePolicy(policyInfo.name + ":" + policyInfo.representation)
policy.state = policyInfo.state
# load children
if policyInfo.policies:
childPolicies = self.convertToPolicies(policyInfo.policies.values())
for childPolicy in childPolicies:
policy.addPolicy(childPolicy)
policies.append(policy)
self.log.debug("loaded policy '%s: %s'", policy.id, repr(policy))
except Exception as exception:
self.log.error("could not load policy '%s': '%s': %s", policyInfo.name, policyInfo.representation, exception)
else:
try:
# get class info
info = policyInfo.type.split(".")
className = str(info.pop())
moduleName = ".".join(info)
# load class from module
module = __import__(moduleName, fromlist=[className])
clazz = getattr(module, className)
# create instance based off constructor
args = inspect.getargspec(clazz.__init__)
if len(args[0]) > 1:
policy = clazz(self.cache)
else:
policy = clazz()
policy.state = policyInfo.state
policies.append(policy)
self.log.debug("loaded policy '%s' of type '%s'", policyInfo.name, policyInfo.type)
except Exception as exception:
self.log.error("could not load policy '%s' of type '%s': %s", policyInfo.name, policyInfo.type, exception)
return policies
def disablePolicy(self, policy):
"""
Disables the given policy
"""
self.log.debug("Disabling policy %s", str(policy))
policyInfo = self.policyDatabase.getPolicyInfo(policy.id)
if policyInfo is None:
self.log.error("Unable to get policy from the database: %s", str(policy))
return
# disable the policy in memory and in the database
if policy.state == States.ENABLED:
policy.state = States.DISABLED
self.policyDatabase.disablePolicy(policy.id)
else:
self.log.info("Policy is already disabled %s", str(policy))
def enablePolicy(self, policy):
"""
Enables the given policy
"""
self.log.debug("Enabling policy %s", str(policy))
policyInfo = self.policyDatabase.getPolicyInfo(policy.id)
if policyInfo is None:
self.log.error("Unable to get policy from the database: %s", str(policy))
return
# enable the policy in memory and in the database
if not policy.isEnabled():
policy.state = States.ENABLED
self.policyDatabase.enablePolicy(policy.id)
else:
self.log.info("Policy is already enabled %s", str(policy))
def loadActions(self):
"""
Loads Actions from the c4/system/policies directory.
"""
actions = getModuleClasses(c4.policyengine.actions, Action)
actions.extend(getModuleClasses(c4.policies, Action))
# filter out base classes
actions = [action for action in actions if action != Action and action != ActionReference]
self.addActions(actions)
def loadDefaultPolicies(self, orderedList=None, includePoliciesFromDatabase=False):
"""
Loads Policies from the c4/system/policies directory.
:param orderedList: List of policy ids to include
:type orderedList: list
:param includePoliciesFromDatabase: Include policies form database?
:type includePoliciesFromDatabase: boolean
"""
# short circuit for empty list
if not orderedList:
self.log.info("Configuration did not specify any policies to load" )
return
# load policies
policies = getModuleClasses(c4.policies, Policy)
# filter out base class
policies = [policy for policy in policies if policy != Policy]
# build temporary unordered dict
policyDict = {}
for policy in policies:
policyDict[policy.id] = policy
wrappedPolicyDict = {}
wrappedPolicies = getModuleClasses(c4.policies, PolicyWrapper)
# remove base class
if PolicyWrapper in wrappedPolicies:
wrappedPolicies.remove(PolicyWrapper)
for wrappedPolicy in wrappedPolicies:
policyString = wrappedPolicy.id + ":" + wrappedPolicy.policy
try:
policy = self.policyParser.parsePolicy(policyString)
wrappedPolicyDict[policy.id] = policy
except Exception as exception:
self.log.exception("could not parse policy wrapper '%s': %s", policyString, exception)
dbPolicyDict = {}
if includePoliciesFromDatabase:
dbPolicies = self.getPoliciesFromDatabase()
for policy in dbPolicies:
dbPolicyDict[policy.id] = policy
self.policies.clear()
# We are specifying an order for loading the policies, but we have 3 sources the policies could be loaded from,
# and the different sources have slightly different behaviors so go through the list to see if the policy can
# be found and then load it based on the source; ie class properties will be type 1, policy wrapper will be type 2,
# and policies that were custom added will be loaded from the database as type 3
# Note that because we are not loading all policies anymore, it isn't sufficient to just check to see if the policy
# database has policies loaded; also because we support dynamic loading it isn't sufficient to always load defaults
for policyId in orderedList:
try:
policy = policyDict.get(policyId, None)
policyType = 1
if not policy:
policy = wrappedPolicyDict.get(policyId, None)
policyType = 2
if not policy:
policy = dbPolicyDict.get(policyId, None)
policyType = 3
if policy:
if policyType == 1:
self.log.debug("loading default policy '%s' of type '%s.%s'", policy.id, policy.__module__, policy.__name__)
self.addPolicy(policy(self.cache))
elif policyType == 2:
self.log.debug("loading default policy '%s' from wrapper", policy.id)
self.addPolicy(policy)
else:
self.log.debug("loading default policy '%s' from database", policy.id)
self.addPolicy(policy)
else:
self.log.error("Configuration error - policy: '%s' not found", policyId )
except Exception as exception:
self.log.exception(exception)
def loadEvents(self):
"""
Loads Events from the c4/system/policies directory.
"""
events = getModuleClasses(c4.policyengine.events, Event)
events.extend(getModuleClasses(c4.policies, Event))
# filter out base classes and operators
events = [event for event in events if event != Event and not issubclass(event, (UnaryOperator, BinaryOperator))]
self.addEvents(events)
def getPoliciesFromDatabase(self):
"""
Get policies from the policy database table
:returns: policies
:rtype: [:class:`Policy`]
"""
return self.convertToPolicies(self.policyDatabase.getPolicyInfos())
def loadPolicy(self, string):
"""
Load a policy into the engine
:param string: policy string
:type string: str
"""
try:
policy = self.policyParser.parsePolicy(string)
self.addPolicy(policy)
except Exception as exception:
self.log.error("could not load policy '%s': %s", string, exception)
def run(self, policy=None):
"""
If a policy is given then check if specified event
matches and perform actions accordingly, followed
by running its child policies.
If no policy is specified start with root policies.
:param policy: policy
:type policy: :class:`Policy`
"""
if policy:
start = datetime.utcnow()
if policy.evaluateEvent():
self.log.debug("event match for '%s'", policy)
policy.performActions()
if hasattr(policy, "policies"):
for childPolicy in policy.policies.values():
if childPolicy.state == States.ENABLED:
try:
self.run(childPolicy)
except Exception as exception:
self.log.exception(exception)
else:
self.log.debug("no event match for '%s'", policy)
end = datetime.utcnow()
self.log.debug("executing policy '%s' took %s", policy.id, end-start)
self.checkPerformanceIssues(policy.id, start, end)
else:
start = datetime.utcnow()
# clear cache on events
self.cache.clear()
self.cache.enabled = True
# go through policies in order
for policy in self.policies.values():
if policy.state == States.ENABLED:
try:
self.run(policy)
except Exception as exception:
self.log.exception(exception)
else:
self.log.debug("'%s' disabled", policy.id)
# clear cache on events
self.cache.clear()
self.cache.enabled = False
end = datetime.utcnow()
self.log.debug("executing policy engine took %s", end-start)
def updateFromDatabase(self):
"""
Update all policies from database (includes list and state).
"""
start = datetime.utcnow()
# check policy list to see if it needs updating
node = self.properties.get('node', None)
name = self.properties.get('name', None)
expectedPolicies = None
role = None
if node and name:
configuration = Backend().configuration
role = configuration.getRole(node)
if role != Roles.DISABLED:
roleInfo = configuration.getRoleInfo(role=role)
if roleInfo:
deviceInfo = roleInfo.devices.get(name, None)
if deviceInfo:
properties = deviceInfo.properties
if properties:
expectedPolicies = properties.get('policies', [])
else:
self.log.info("Node is disabled removing policies...")
expectedPolicies = []
self.policies.clear()
if expectedPolicies or (role and role == Roles.DISABLED):
replacePolicies = False
# check for extra policies
for policy in self.policies.keys():
if policy not in expectedPolicies:
replacePolicies = True
break
if not replacePolicies:
# check for missing policies
for policy in expectedPolicies:
if policy not in self.policies.keys():
replacePolicies = True
break
# if mismatch then replace all policies (since order matters)
if replacePolicies:
self.log.info("Expected policies: %s", str(expectedPolicies))
self.log.info("Actual policies: %s", str(self.policies.keys()))
self.log.info("Correcting policies...")
includePoliciesFromDatabase = self.properties.get("include.policies.database", False)
self.loadDefaultPolicies(orderedList=expectedPolicies, includePoliciesFromDatabase=includePoliciesFromDatabase)
#TODO send device name a setPolicies operation message to update it's status for reporting
address = socket.gethostname().split(".")[0]
client = RouterClient(address)
client.forwardMessage(Operation("{0}/{1}".format(node, name),
"setPolicies",
policies=expectedPolicies))
# go through policies in order to update states
for key, policy in self.policies.items():
dbState = self.policyDatabase.getPolicyState(policy.id)
if policy.state != dbState:
policy.state = dbState
self.policies[key] = policy
end = datetime.utcnow()
self.log.debug("updating policy engine took %s", end-start)
def checkPerformanceIssues(self, policyName, start, end):
"""
TODO: documentation
"""
# this value might require tweaking for complex policies and multinode systems
policyPerfomanceWarn = self.properties.get("performance.warning.threshold", 2)
execTime = (end-start).total_seconds()
if execTime > policyPerfomanceWarn:
self.log.warning("Executing policy '%s' has taken: %s seconds", policyName, execTime)
class PolicyInfo(JSONSerializable):
"""
Policy information
:param name: name
:type name: str
:param representation: representation
:type representation: str
:param state: state
:type state: :class:`States`
:param policyType: type
:type policyType: str
:param properties: properties
:type properties: dict
"""
def __init__(self, name, representation, state, policyType, properties):
self.name = name
self.representation = representation
self.state = state
self.type = policyType
self.policies = None
self.properties = properties
def addPolicyInfo(self, policyInfo):
"""
Add child policy information
:param policyInfo: policy info
:type policyInfo: :class:`PolicyInfo`
:returns: :class:`PolicyInfo`
"""
if self.policies is None:
self.policies = OrderedDict()
if policyInfo.name in self.policies:
log.error("'%s' already part of '%s'", policyInfo.name, self.name)
else:
self.policies[policyInfo.name] = policyInfo
return self
@ClassLogger
class PolicyParser(object):
"""
Base implementation of a policy parser using ``pyparsing``
:param policyEngine: policy engine
:type policyEngine: :class:`PolicyEngine`
"""
def __init__(self, policyEngine):
self.policyEngine = policyEngine
self.unaryOperators = {}
self.binaryOperators = {}
import pyparsing
# constant values
self.stringConstantElement = (pyparsing.QuotedString("\"", unquoteResults=True) |
pyparsing.QuotedString("'", unquoteResults=True))
self.numberConstantElement = pyparsing.Word(pyparsing.nums + ".")
def numberConstantElementParseAction(tokens):
"""
Parse number constants into `float` or `int`
"""
self.log.debug("found number constant '%s'", tokens[0])
if "." in tokens[0]:
try:
return float(tokens[0])
except:
pass
else:
try:
return int(tokens[0])
except:
pass
return tokens
self.numberConstantElement.addParseAction(numberConstantElementParseAction)
self.constantElement = self.stringConstantElement | self.numberConstantElement
# key-value pair constant
self.namedConstantElement = pyparsing.Word(pyparsing.alphanums) + "=" + self.constantElement
def namedConstantParseAction(string, location, tokens):
"""
Parse named constant into a key-value dictionary
"""
self.log.debug("found named constant '%s = %s'", tokens[0], tokens[2])
return {tokens[0]: tokens[2]}
self.namedConstantElement.addParseAction(namedConstantParseAction)
self.eventReferenceElement = pyparsing.Forward()
# parameters
self.parameterElement = self.constantElement | self.namedConstantElement | self.eventReferenceElement
self.parametersElement = self.parameterElement + pyparsing.ZeroOrMore(pyparsing.Suppress(",") + self.parameterElement)
def parametersParseAction(string, location, tokens):
"""
Parse parameters into arguments and key value arguments tuple
"""
arguments = []
keyValueArguments = {}
for parameter in tokens:
self.log.debug("found parameter '%s'", repr(parameter))
if isinstance(parameter, dict):
keyValueArguments.update(parameter)
else:
arguments.append(parameter)
return (arguments, keyValueArguments)
self.parametersElement.addParseAction(parametersParseAction)
# event references
self.eventReferenceElement << (
(
pyparsing.Word(pyparsing.alphanums + ".") +
pyparsing.Suppress("(") +
pyparsing.Optional(self.parametersElement) +
pyparsing.Suppress(")")) |
pyparsing.Word(pyparsing.alphanums + ".")
)
def eventReferenceElementParseAction(string, location, tokens):
"""
Parse event references into a cachable event
"""
if len(tokens) == 1:
self.log.debug("found event reference '%s'", tokens[0])
parameters = ([], {})
else:
self.log.debug("found event reference '%s%s'", tokens[0], repr(tokens[1]))
parameters = tokens[1]
if tokens[0] not in self.policyEngine.events:
raise pyparsing.ParseFatalException(
string, location,
"found unknown event reference '{0}'".format(repr(tokens[0])))
# set up event implementation
event = self.policyEngine.events[tokens[0]]()
self.checkParameters(event, "evaluate", parameters[0], parameters[1])
return CachableEvent(
self.policyEngine.cache,
EventReference(event, parameters[0], parameters[1]))
self.eventReferenceElement.addParseAction(eventReferenceElementParseAction)
# event operators
self.unaryOperatorElement = pyparsing.Or([])
self.binaryOperatorElement = pyparsing.Or([])
# TODO: outsource to load function?
unaryOperatorList = getModuleClasses(c4.policyengine.events.operators, UnaryOperator)
for operatorImplementation in unaryOperatorList:
self.unaryOperators[operatorImplementation.id] = operatorImplementation
self.unaryOperatorElement.append(pyparsing.Or(operatorImplementation.id))
binaryOperatorList = getModuleClasses(c4.policyengine.events.operators, BinaryOperator)
for operatorImplementation in binaryOperatorList:
self.binaryOperators[operatorImplementation.id] = operatorImplementation
self.binaryOperatorElement.append(pyparsing.Or(operatorImplementation.id))
# basic value event with an optional unary operator
self.valueEventElement = (
pyparsing.Optional(self.unaryOperatorElement) +
(self.constantElement | self.eventReferenceElement)
)
def valueEventElementParseAction(string, location, tokens):
"""
Parse value event
"""
if len(tokens) == 1:
self.log.debug("found event '%s'", repr(tokens[0]))
return tokens[0]
# check for unary operators
if len(tokens) == 2:
self.log.debug("found event '%s %s'", tokens[0], repr(tokens[1]))
if tokens[0] in self.unaryOperators:
return self.unaryOperators[tokens[0]](tokens[1])
else:
raise pyparsing.ParseException("found unknown unary operator '{0}'".format(repr(tokens[0])))
self.valueEventElement.addParseAction(valueEventElementParseAction)
# complex event that may consist of a combination of events
self.eventElement = pyparsing.Forward()
self.eventElement << (
(
pyparsing.Optional(self.unaryOperatorElement) + pyparsing.Suppress("(") + self.eventElement + pyparsing.Suppress(")") +
pyparsing.Optional(
self.binaryOperatorElement +
pyparsing.Or([
pyparsing.Optional(self.unaryOperatorElement) + pyparsing.Suppress("(") + self.eventElement + pyparsing.Suppress(")"),
self.valueEventElement])
)
) |
(self.valueEventElement + self.binaryOperatorElement + self.valueEventElement) |
self.valueEventElement
)
def eventElementParseAction(string, location, tokens):
"""
Parse event
"""
if len(tokens) == 1:
self.log.debug("found event '%s'", repr(tokens[0]))
return tokens[0]
# check for unary operators
if len(tokens) == 2:
self.log.debug("found event '%s %s'", tokens[0], repr(tokens[1]))
if tokens[0] in self.unaryOperators:
return self.unaryOperators[tokens[0]](tokens[1])
else:
raise pyparsing.ParseException("found unknown unary operator '{0}'".format(repr(tokens[0])))
# check for binary operators
if len(tokens) == 3:
self.log.debug("found event '%s %s %s)'", repr(tokens[0]), tokens[1], repr(tokens[2]))
if tokens[1] in self.binaryOperators:
return self.binaryOperators[tokens[1]](tokens[0], tokens[2])
else:
raise pyparsing.ParseException("found unknown binary operator '{0}'".format(tokens[1]))
self.eventElement.addParseAction(eventElementParseAction)
# action identifier
self.actionIdElement = pyparsing.Word(pyparsing.alphanums + ".")
# action specified by an id and optional parameters
self.actionElement = (self.actionIdElement +
pyparsing.Suppress("(") + pyparsing.Optional(self.parametersElement) + pyparsing.Suppress(")"))
def actionElementParseAction(string, location, tokens):
"""
Parse action into an action reference
"""
if len(tokens) == 1:
self.log.debug("found action '%s'", tokens[0])
parameters = ([], {})
else:
self.log.debug("found action '%s%s'", tokens[0], repr(tokens[1]))
parameters = tokens[1]
if tokens[0] not in self.policyEngine.actions:
raise pyparsing.ParseFatalException(
string, location,
"found unknown action reference '{0}'".format(tokens[0]))
# set up action implementation
action = self.policyEngine.actions[tokens[0]]()
arguments = parameters[0]
keyValueArguments = parameters[1]
handlerArgSpec = inspect.getargspec(action.perform)
handlerArguments = handlerArgSpec[0][1:]
# check for named arguments
handlerKeyValueArguments = {}
if handlerArgSpec[3]:
keys = handlerArguments[-len(handlerArgSpec[3]):]
handlerKeyValueArguments = dict(zip(keys, handlerArgSpec[3]))
handlerArguments = handlerArguments[:len(handlerArguments)-len(handlerArgSpec[3])]
# make sure we have at least the number of arguments that the action requires
if len(handlerArguments) != len(arguments):
raise pyparsing.ParseFatalException(
string, location,
"action '{0}' requires {1} arguments but {2}: {3} are given".format(
action, len(handlerArguments), len(arguments), arguments))
# check for unknown named arguments
for key in keyValueArguments:
if key not in handlerKeyValueArguments:
raise pyparsing.ParseFatalException(
string, location,
"action '{0}' does not have a named argument '{1}'".format(action, key))
return ActionReference(self.policyEngine.actions[tokens[0]](), parameters[0], parameters[1])
self.actionElement.addParseAction(actionElementParseAction)
# list of actions
self.actionsElement = self.actionElement + pyparsing.ZeroOrMore(pyparsing.Suppress(",") + self.actionElement)
# policy element consisting of a name, an event and a set of actions
self.policyElement = pyparsing.Word(pyparsing.alphanums + "." + "_") + pyparsing.Suppress(":") + self.eventElement + pyparsing.Suppress("->") + self.actionsElement
def checkParameters(self, o, method, arguments, keyValueArguments):
"""
Check parameters for the specified method
:param o: object
:type o: object
:param method: method
:type method: str
:param arguments: arguments
:type arguments: list
:param keyValueArguments: key value arguments
:type keyValueArguments: dict
:raises ValueError: if parameters are not valid
"""
# set up implementation
handlerArgSpec = inspect.getargspec(getattr(o, method))
handlerArguments = handlerArgSpec[0][1:]
# check for named arguments
handlerKeyValueArguments = {}
if handlerArgSpec[3]:
keys = handlerArguments[-len(handlerArgSpec[3]):]
handlerKeyValueArguments = dict(zip(keys, handlerArgSpec[3]))
handlerArguments = handlerArguments[:len(handlerArguments)-len(handlerArgSpec[3])]
# make sure we have at least the number of arguments that the object requires
if len(handlerArguments) != len(arguments) and handlerArgSpec[1] is None:
raise ValueError("object '{0}' requires {1} arguments but {2}: {3} are given".format(
repr(o), len(handlerArguments), len(arguments), arguments))
# check for unknown named arguments
for key in keyValueArguments:
if key not in handlerKeyValueArguments:
raise ValueError("object '{0}' does not have a named argument '{1}'".format(repr(o), key))
def parseAction(self, string):
"""
Parse string into :class:`Action`
:returns: :class:`Action`
"""
return self.actionElement.parseString(string, parseAll=True)[0]
def parseActions(self, string):
"""
Parse string into multiple :class:`Action` s
:returns: [:class:`Action`]
"""
return self.actionsElement.parseString(string, parseAll=True)
def parseEvent(self, string):
"""
Parse string into :class:`Event`
:returns: :class:`Event`
"""
return self.eventElement.parseString(string, parseAll=True)[0]
def parsePolicy(self, string):
"""
Parse string into :class:`Policy`
:returns: :class:`Policy`
"""
policyItems = self.policyElement.parseString(string, parseAll=True)
return PolicyComponent(policyItems[0], policyItems[1], policyItems[2:])
@ClassLogger
class PolicyEngineProcess(multiprocessing.Process):
"""
Policy engine process
:param properties: properties
:type properties: dict
"""
def __init__(self, properties=None):
super(PolicyEngineProcess, self).__init__(name="Policy engine")
self.properties = properties or {}
self.initial = self.properties.get("policy.timer.initial", 5)
self.repeat = self.properties.get("policy.timer.repeat", -1)
self.interval = self.properties.get("policy.timer.interval", 10)
self.updateEnabled = self.properties.get("update.from.db", False)
def run(self):
"""
The implementation of the policy engine process
"""
policyEngine = PolicyEngine(properties=self.properties)
policies = policyEngine.policies
self.log.info("policies: %s", str(policies))
try:
# wait until device managers transition to running before starting
node = self.properties.get('node', None)
devicesNotRunning = self.getDevicesNotRunning(node)
while len(devicesNotRunning) > 0:
self.log.info("Waiting for devices to become running: %s", ", ".join(devicesNotRunning))
time.sleep(self.interval)
devicesNotRunning = self.getDevicesNotRunning(node)
time.sleep(self.initial)
if self.repeat < 0:
while True:
if self.updateEnabled:
policyEngine.updateFromDatabase()
policyEngine.run()
time.sleep(self.interval)
else:
while self.repeat >= 0:
policyEngine.run()
time.sleep(self.interval)
self.repeat -= 1
except KeyboardInterrupt:
self.log.debug("Exiting %s", self.name)
except:
self.log.debug("Forced exiting %s", self.name)
self.log.error(traceback.format_exc())
def getDevicesNotRunning(self, node):
"""
Build a list of devices on this node that are not in running state.
:param node: node to get devices for
:type node: str
:returns: list of device names
"""
devices = Backend().configuration.getDevices(node, flatDeviceHierarchy=True)
devicesNotRunning = []
for deviceInfo in devices.values():
if deviceInfo.state != ConfigStates.RUNNING:
devicesNotRunning.append(deviceInfo.name)
return devicesNotRunning
class PolicyProperties(JSONSerializable):
"""
Policy properties
"""
def __init__(self):
self.description = None
@ClassLogger
class PolicyWrapper(object):
"""
Derived classes need to provide an id and policy string.
The PolicyWrapper class is used to load policies from disk,
see c4/system/policies/
"""
id = ""
policy = ""
@ClassLogger
class UnaryOperator(Event):
"""
A unary operator base class
:param one: event one
:type one: :class:`Event`
"""
__metaclass__ = ABCMeta
id = "unaryOperator"
def __init__(self, one):
super(UnaryOperator, self).__init__()
self.one = ValueEvent.create(one)
@abstractmethod
def evaluateOperation(self, one):
"""
Evaluate the unary operation with the specified operands
"""
def evaluate(self):
one = self.one.evaluate()
return self.evaluateOperation(one)
def __repr__(self, *args, **kwargs):
return "({0} {1})".format(self.id, repr(self.one))
def __str__(self, *args, **kwargs):
return "({0} {1} -> {2})".format(self.id, self.one, self.evaluate())
@ClassLogger
class ValueEvent(Event):
"""
A base value event
:param value: value
"""
id = "value"
def __init__(self, value):
super(ValueEvent, self).__init__()
self._value = value
def evaluate(self):
"""
Return the value of the event
:returns: value
"""
return self._value
@staticmethod
def create(value):
"""
Create a :class:`ValueEvent` given the value.
.. note::
If ``value`` is already an :class:`Event` then
itself is returned instead
:param value: value
"""
if isinstance(value, Event):
return value
else:
return ValueEvent(value)
def __repr__(self, *args, **kwargs):
return repr(self._value)
def __str__(self, *args, **kwargs):
return str(self.evaluate())
| """
Copyright (c) IBM 2015-2017. All Rights Reserved.
Project name: c4-policy-engine
This project is licensed under the MIT License, see LICENSE
A policy engine implementation with support for events and actions as well as textual representations
"""
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from datetime import datetime
import inspect
import logging
import multiprocessing
import re
import socket
import time
import traceback
from c4.messaging import RouterClient
import c4.policies
import c4.policyengine.actions
import c4.policyengine.events
import c4.policyengine.events.operators
from c4.system.backend import Backend
from c4.system.configuration import (States as ConfigStates, Roles)
from c4.system.messages import Operation
from c4.utils.enum import Enum
from c4.utils.jsonutil import JSONSerializable
from c4.utils.logutil import ClassLogger
from c4.utils.util import (callWithVariableArguments,
getFormattedArgumentString, getFullModuleName, getModuleClasses)
log = logging.getLogger(__name__)
class States(Enum):
"""
Enumeration of states
"""
ENABLED = "enabled"
DISABLED = "disabled"
@ClassLogger
class Event(object):
"""
An event implementation
"""
__metaclass__ = ABCMeta
id = None
def __init__(self):
pass
# TODO: type, group, severity, description
# see http://www-01.ibm.com/support/knowledgecenter/SSULQD_7.1.0/com.ibm.nz.adm.doc/r_sysadm_template_event_rules.html
@abstractmethod
def evaluate(self):
"""
Evaluate the event
.. note::
Subclasses should implement this
:returns: value
"""
@property
def value(self):
"""
Value of the event
"""
return self.evaluate()
def __repr__(self, *args, **kwargs):
return "({0})".format(self.id)
def __str__(self, *args, **kwargs):
return "({0} -> {1})".format(self.id, self.evaluate())
@ClassLogger
class EventReference(Event):
"""
A reference to an :class:`Event`
:param event: event
:type event: :class:`Event`
:param arguments: arguments
:param keyValueArguments: key value arguments
"""
def __init__(self, event, arguments=None, keyValueArguments=None):
super(EventReference, self).__init__()
self.event = event
self.id = event.id
if arguments is None:
self.arguments = []
else:
self.arguments = arguments
if keyValueArguments is None:
self.keyValueArguments = {}
else:
self.keyValueArguments = keyValueArguments
def evaluate(self):
"""
Evaluate the specified event using the given
arguments and key value arguments
:returns: result
"""
try:
arguments = []
for argument in self.arguments:
if isinstance(argument, (EventReference, CachableEvent)):
arguments.append(argument.evaluate())
elif isinstance(argument, Event):
raise ValueError("'{0}' needs to be an EventReference".format(repr(argument)))
else:
arguments.append(argument)
keyValueArguments = {}
for key, value in self.keyValueArguments.items():
if isinstance(value, (EventReference, CachableEvent)):
keyValueArguments[key] = value.evaluate()
elif isinstance(value, Event):
raise ValueError("'{0}={1}' needs to be an EventReference".format(key, repr(value)))
else:
keyValueArguments[key] = value
return callWithVariableArguments(self.event.evaluate, *arguments, **keyValueArguments)
except Exception as exception:
self.log.error(self.event)
self.log.exception(exception)
def __repr__(self, *args, **kwargs):
return "({0}{1})".format(self.id,
getFormattedArgumentString(self.arguments, self.keyValueArguments))
def __str__(self, evaluatedValue=None, *args, **kwargs):
# TODO: what about if the value is actually None?
if evaluatedValue is None:
evaluatedValue = self.evaluate()
return "({0}{1} -> {2})".format(self.id,
getFormattedArgumentString(self.arguments, self.keyValueArguments),
evaluatedValue)
@ClassLogger
class Action(object):
"""
An action implementation
"""
__metaclass__ = ABCMeta
id = None
@abstractmethod
def perform(self):
"""
Perform specified action
.. note::
Subclasses should add arguments as needed
:returns: result
"""
def __repr__(self, *args, **kwargs):
return "{0}(...)".format(self.id)
@ClassLogger
class ActionReference(Action):
"""
A reference to an :class:`Action`
:param action: action
:type action: :class:`Action`
:param arguments: arguments
:param keyValueArguments: key value arguments
"""
def __init__(self, action, arguments=None, keyValueArguments=None):
self.action = action
self.id = action.id
if arguments is None:
self.arguments = []
else:
self.arguments = arguments
if keyValueArguments is None:
self.keyValueArguments = {}
else:
self.keyValueArguments = keyValueArguments
def perform(self):
"""
Perform specified action using the given
arguments and key value arguments
:returns: result
"""
try:
return callWithVariableArguments(self.action.perform, *self.arguments, **self.keyValueArguments)
except Exception as exception:
self.log.error(self.action)
self.log.exception(exception)
def __repr__(self, *args, **kwargs):
return "{0}{1}".format(self.id, getFormattedArgumentString(self.arguments, self.keyValueArguments))
@ClassLogger
class BinaryOperator(Event):
"""
A binary operator base class
:param one: event one
:type one: :class:`Event`
:param two: event two
:type two: :class:`Event`
"""
__metaclass__ = ABCMeta
id = "binaryOperator"
def __init__(self, one, two):
super(BinaryOperator, self).__init__()
self.one = ValueEvent.create(one)
self.two = ValueEvent.create(two)
@abstractmethod
def evaluateOperation(self, one, two):
"""
Evaluate the binary operation with the specified operands
"""
def evaluate(self):
one = self.one.evaluate()
two = self.two.evaluate()
return self.evaluateOperation(one, two)
def __repr__(self, *args, **kwargs):
return "({0} {1} {2})".format(repr(self.one), self.id, repr(self.two))
def __str__(self, *args, **kwargs):
return "({0} {1} {2} -> {3})".format(self.one, self.id, self.two, self.evaluate())
@ClassLogger
class Cache(dict):
"""
A memory-based dictionary cache
"""
def __init__(self):
super(Cache, self).__init__()
self.enabled = True
@ClassLogger
class CachableEvent(Event):
"""
An event which value can be cached
:param cache: cache
:type cache: :class:`Cache`
:param event: event
:type event: :class:`Event`
"""
def __init__(self, cache, event):
super(CachableEvent, self).__init__()
self.cache = cache
self.event = event
self.id = event.id
def evaluate(self):
if self.cache.enabled:
if self.id not in self.cache:
self.cache[self.id] = self.event.evaluate()
return self.cache[self.id]
else:
return self.event.evaluate()
def __repr__(self, *args, **kwargs):
return repr(self.event)
def __str__(self, *args, **kwargs):
if isinstance(self.event, EventReference):
return self.event.__str__(evaluatedValue=self.evaluate())
return "({0} -> {1})".format(self.id, self.evaluate())
@ClassLogger
class Policy(object):
"""
A policy base class
:param cache: cache
:type cache: :class:`Cache`
"""
__metaclass__ = ABCMeta
id = None
def __init__(self, cache=None):
self.cache = cache
self.state = States.ENABLED
@property
def description(self):
"""
Formatted description based on the doc string
"""
if not self.__doc__:
return ""
description = []
for line in self.__doc__.splitlines():
line = line.strip()
if line and not line.startswith(":"):
description.append(line)
return "\n".join(description)
@abstractmethod
def evaluateEvent(self):
"""
Evaluate the event to determine if the action for this policy should
to be performed
"""
@abstractmethod
def performActions(self):
"""
Perform actions specified for the policy if the event evaluated as
``True``
"""
def __hash__(self, *args, **kwargs):
return hash(repr(self))
def __repr__(self, *args, **kwargs):
return "{0}".format(self.id)
def __str__(self, *args, **kwargs):
return "{0}".format(self.id)
@ClassLogger
class PolicyComponent(Policy):
"""
A policy component consisting of an event and respective list of actions
:param name: name
:type name: str
:param event: event
:type event: :class:`Event`
:param actions: list of actions
:param actions: [:class:`ActionReference`]
"""
def __init__(self, name, event, actions, cache=None):
super(PolicyComponent, self).__init__(cache)
self.id = name
self.event = event
self.actions = actions
self.policyHashes = {}
self.policies = OrderedDict()
def addPolicy(self, policy):
"""
Add a child policy
:param policy: policy
:type policy: :class:`Policy`
"""
policyHash = hash(policy)
if policyHash in self.policyHashes:
self.log.error("policy '%s' already exists", repr(policy))
else:
self.policyHashes[policyHash] = policy
self.policies[policy.id] = policy
if isinstance(policy, PolicyComponent):
self.log.debug("'%s' added policy '%s' '%s'", self.id, policy.id, repr(policy))
else:
self.log.debug("'%s' added policy '%s'", self.id, policy.id)
def evaluateEvent(self):
return self.event.evaluate()
def performActions(self):
if self.actions:
for action in self.actions:
action.perform()
def __hash__(self, *args, **kwargs):
return hash(repr(self))
def __repr__(self, *args, **kwargs):
return "{0} -> {1}".format(repr(self.event), ",".join([str(action) for action in self.actions]))
def __str__(self, *args, **kwargs):
return "{0}: {1} -> {2}".format(self.id, self.event, ",".join([str(action) for action in self.actions]))
@ClassLogger
class PolicyDatabase(object):
"""
An abstraction of the underlying database where policies are stored
"""
def __init__(self):
self.store = Backend().keyValueStore
def addPolicyUsingName(self, fullPolicyName, policy):
"""
Add a policy
:param fullPolicyName: fully qualified policy name
:type fullPolicyName: str
:param policy: policy
:type policy: :class:`Policy`
"""
nameHierarchy = fullPolicyName.split("/")
if len(nameHierarchy) == 1:
# no parent
if self.policyExists(policy.id):
# with the ability to run policy engine on multiple nodes but with shared database this is acceptable
self.log.debug("policy '%s' already exists", repr(policy))
return False
else:
self.addPolicy(policy)
# check if we can add children
if hasattr(policy, "policies"):
for childPolicy in policy.policies.values():
self.addPolicyUsingName("{0}/{1}".format(fullPolicyName, childPolicy.id), childPolicy)
else:
self.log.warn("Parent child relationships not implemented yet")
return False
return True
def addPolicy(self, policy):
"""
Add a policy
:param policy: policy
:type policy: :class:`Policy`
"""
policyInfo = self.getPolicyInfo(policy.id)
if policyInfo:
self.log.error("policy '%s' already exists", policyInfo.name)
return None
properties = {}
if isinstance(policy, PolicyComponent):
representation = repr(policy)
policyType = "{}.{}".format(getFullModuleName(PolicyComponent), PolicyComponent.__name__)
else:
representation = policy.id
policyType = "{}.{}".format(getFullModuleName(policy), policy.__class__.__name__)
if policy.description:
properties["description"] = policy.description
policyKey = self.getKey(policy.id)
propertiesKey = "{policyKey}/properties".format(policyKey=policyKey)
representationKey = "{policyKey}/representation".format(policyKey=policyKey)
stateKey = "{policyKey}/state".format(policyKey=policyKey)
typeKey = "{policyKey}/type".format(policyKey=policyKey)
transaction = self.store.transaction
transaction.put(policyKey, policy.id)
transaction.put(representationKey, representation)
transaction.put(stateKey, policy.state.toJSON(includeClassInfo=True))
transaction.put(typeKey, policyType)
for key, value in properties.items():
propertyKey = "{propertiesKey}/{key}".format(propertiesKey=propertiesKey, key=key)
transaction.put(propertyKey, value)
transaction.commit()
if isinstance(policy, PolicyComponent):
self.log.debug("stored policy '%s' '%s'", policy.id, representation)
else:
self.log.debug("stored policy '%s'", policy.id)
def clear(self):
"""
Remove all policies
"""
self.store.deletePrefix("/policies/")
def disablePolicy(self, fullPolicyName):
"""
Disables the policy in the database given its name
:param fullPolicyName: fully qualified policy name
:type fullPolicyName: str
"""
stateKey = self.getKey(fullPolicyName, "state")
serializedState = self.store.get(stateKey)
if not serializedState:
self.log.error("could not disable '%s' because it does not exist", fullPolicyName)
return
self.store.put(stateKey, States.DISABLED.toJSON(includeClassInfo=True))
def enablePolicy(self, fullPolicyName):
"""
Enables the policy in the database given its name
:param fullPolicyName: fully qualified policy name
:type fullPolicyName: str
"""
stateKey = self.getKey(fullPolicyName, "state")
serializedState = self.store.get(stateKey)
if not serializedState:
self.log.error("could not enable '%s' because it does not exist", fullPolicyName)
return
self.store.put(stateKey, States.ENABLED.toJSON(includeClassInfo=True))
def getKey(self, fullPolicyName, *additionalParts):
"""
Get key for the specified policy
:param fullPolicyName: fully qualified policy name
:type fullPolicyName: str
:returns: key
:rtype: str
"""
nameHierarchy = fullPolicyName.split("/")
keyParts = [""]
for name in nameHierarchy:
keyParts.extend(["policies", name])
keyParts.extend(additionalParts)
return "/".join(keyParts)
def getNestedPolicyInfos(self, parentKey, policyInfoMapping):
"""
Get policies based on parent key and the already retrieved values
:param parentKey: parent key
:type parentKey: str
:param policyInfoMapping: policy information mapping of key-value
:type policyInfoMapping: dict
"""
policies = {}
policyKeyExpression = re.compile(r"(?P<policyKey>{parentKey}/policies/[^/]+)$".format(parentKey=parentKey))
for key in policyInfoMapping.keys():
match = policyKeyExpression.match(key)
if match:
policyKey = match.group("policyKey")
propertiesKey = "{policyKey}/properties/".format(policyKey=policyKey)
representationKey = "{policyKey}/representation".format(policyKey=policyKey)
stateKey = "{policyKey}/state".format(policyKey=policyKey)
typeKey = "{policyKey}/type".format(policyKey=policyKey)
policyProperties = {
key.replace(propertiesKey, ""): value
for key, value in policyInfoMapping.items()
if key.startswith(propertiesKey)
}
policyInfo = PolicyInfo(
policyInfoMapping[policyKey],
policyInfoMapping[representationKey],
policyInfoMapping[stateKey],
policyInfoMapping[typeKey],
policyProperties
)
policyInfo.policies = self.getNestedPolicyInfos(policyKey, policyInfoMapping)
policies[policyInfo.name] = policyInfo
return policies
def getNumberOfTopLevelPolicies(self):
"""
Get number of top level policies
:returns: number of top level policies
:rtype: int
"""
pattern = re.compile("/policies/[^/]+$")
policies = {
pattern.search(key)
for key, _ in self.store.getPrefix("/policies/")
if pattern.search(key)
}
return len(policies)
def getPolicyInfo(self, fullPolicyName):
"""
Get policy info for the specified policy
:param fullPolicyName: fully qualified policy name
:type fullPolicyName: str
:returns: policy info
:rtype: :class:`PolicyInfo`
"""
policyKey = self.getKey(fullPolicyName)
policyName = self.store.get(policyKey)
if not policyName:
return None
policyPrefix = policyKey + "/"
# map from key to value and deserialize value automatically
policyInfoMapping = {
key : JSONSerializable.fromJSON(value) if JSONSerializable.classAttribute in value else value
for key, value in self.store.getPrefix(policyPrefix)
}
# deal with policy information
propertiesKey = "{policyKey}/properties/".format(policyKey=policyKey)
representationKey = "{policyKey}/representation".format(policyKey=policyKey)
stateKey = "{policyKey}/state".format(policyKey=policyKey)
typeKey = "{policyKey}/type".format(policyKey=policyKey)
policyProperties = {
key.replace(propertiesKey, ""): value
for key, value in policyInfoMapping.items()
if key.startswith(propertiesKey)
}
policyInfo = PolicyInfo(
policyName,
policyInfoMapping[representationKey],
policyInfoMapping[stateKey],
policyInfoMapping[typeKey],
policyProperties
)
policyInfo.policies = self.getNestedPolicyInfos(policyKey, policyInfoMapping)
return policyInfo
def getPolicyInfos(self):
"""
Get all policy infos
:returns: list of policy infos
:rtype: [:class:`PolicyInfo`]
"""
policyInfoMapping = {
key: JSONSerializable.fromJSON(value) if JSONSerializable.classAttribute in value else value
for key, value in self.store.getPrefix("/policies")
}
return self.getNestedPolicyInfos("", policyInfoMapping).values()
def getPolicyState(self, fullPolicyName):
"""
Get the state of 'policy' if it exists
:param fullPolicyName: fully qualified policy name
:type fullPolicyName: str
:returns: state of the policy if it exists else None
:rtype: :class:`States`
"""
stateKey = self.getKey(fullPolicyName, "state")
value = self.store.get(stateKey)
if value is None:
self.log.error("could not get state because '%s' does not exist", fullPolicyName)
return None
return JSONSerializable.fromJSON(value)
def policyExists(self, fullPolicyName):
"""
Does the specified policy already exist
:param fullPolicyName: fully qualified policy name
:type fullPolicyName: str
:returns: whether policy exists
:rtype: bool
"""
stateKey = self.getKey(fullPolicyName, "state")
serializedState = self.store.get(stateKey)
if serializedState:
return True
return False
@ClassLogger
class PolicyEngine(object):
"""
Policy engine that allows iterating over policies and performing their actions
based on whether the specified event matches
:param properties: properties
:type properties: dict
"""
def __init__(self, properties=None):
self.events = {}
self.cache = Cache()
self.cache.enabled = False
self.actions = {}
self.policyParser = PolicyParser(self)
self.policies = OrderedDict()
self.policyDatabase = PolicyDatabase()
self.properties = properties or {}
self.loadActions()
self.loadEvents()
orderedList = self.properties.get("policies", [] )
includePoliciesFromDatabase = self.properties.get("include.policies.database", False)
self.loadDefaultPolicies(orderedList=orderedList, includePoliciesFromDatabase=includePoliciesFromDatabase)
def addAction(self, action):
"""
Add known action
:param action: action
:type action: :class:`Action`
"""
self.log.debug("adding action '%s'", action.id)
self.actions[action.id] = action
def addActions(self, actions):
"""
Add known actions
:param actions: actions
:type actions: [:class:`Action`]
"""
for action in actions:
self.addAction(action)
def addEvent(self, event):
"""
Add known event
:param event: event
:type event: :class:`Event`
"""
if event == Event:
self.log.warn("cannot add base event class")
elif issubclass(event, (UnaryOperator, BinaryOperator)):
self.log.warn("cannot add operator '%s'", event.id)
else:
self.log.debug("adding event '%s'", event.id)
self.events[event.id] = event
def addEvents(self, events):
"""
Add known events
:param events: events
:type events: [:class:`Event`]
"""
for event in events:
self.addEvent(event)
def addPolicy(self, policy):
"""
Add a policy
:param policy: policy
:type policy: :class:`Policy`
"""
if self.policyDatabase.addPolicyUsingName(policy.id, policy):
self.policies[policy.id] = policy
elif policy.id not in self.policies:
self.policies[policy.id] = policy
def addPolicies(self, policies):
"""
Add policies
:param policies: policies
:type policies: [:class:`Policy`]
"""
for policy in policies:
self.addPolicy(policy)
def convertToPolicies(self, policyInfos):
"""
Convert policy infos into actual policies
:param policyInfos: policy infos
:type policyInfos: [:class:`PolicyInfo`]
:returns: policies
:rtype: [:class:`Policy`]
"""
policyComponentType = "{}.{}".format(getFullModuleName(PolicyComponent), PolicyComponent.__name__)
policies = []
for policyInfo in policyInfos:
if policyInfo.type == policyComponentType:
try:
policy = self.policyParser.parsePolicy(policyInfo.name + ":" + policyInfo.representation)
policy.state = policyInfo.state
# load children
if policyInfo.policies:
childPolicies = self.convertToPolicies(policyInfo.policies.values())
for childPolicy in childPolicies:
policy.addPolicy(childPolicy)
policies.append(policy)
self.log.debug("loaded policy '%s: %s'", policy.id, repr(policy))
except Exception as exception:
self.log.error("could not load policy '%s': '%s': %s", policyInfo.name, policyInfo.representation, exception)
else:
try:
# get class info
info = policyInfo.type.split(".")
className = str(info.pop())
moduleName = ".".join(info)
# load class from module
module = __import__(moduleName, fromlist=[className])
clazz = getattr(module, className)
# create instance based off constructor
args = inspect.getargspec(clazz.__init__)
if len(args[0]) > 1:
policy = clazz(self.cache)
else:
policy = clazz()
policy.state = policyInfo.state
policies.append(policy)
self.log.debug("loaded policy '%s' of type '%s'", policyInfo.name, policyInfo.type)
except Exception as exception:
self.log.error("could not load policy '%s' of type '%s': %s", policyInfo.name, policyInfo.type, exception)
return policies
def disablePolicy(self, policy):
"""
Disables the given policy
"""
self.log.debug("Disabling policy %s", str(policy))
policyInfo = self.policyDatabase.getPolicyInfo(policy.id)
if policyInfo is None:
self.log.error("Unable to get policy from the database: %s", str(policy))
return
# disable the policy in memory and in the database
if policy.state == States.ENABLED:
policy.state = States.DISABLED
self.policyDatabase.disablePolicy(policy.id)
else:
self.log.info("Policy is already disabled %s", str(policy))
def enablePolicy(self, policy):
"""
Enables the given policy
"""
self.log.debug("Enabling policy %s", str(policy))
policyInfo = self.policyDatabase.getPolicyInfo(policy.id)
if policyInfo is None:
self.log.error("Unable to get policy from the database: %s", str(policy))
return
# enable the policy in memory and in the database
if not policy.isEnabled():
policy.state = States.ENABLED
self.policyDatabase.enablePolicy(policy.id)
else:
self.log.info("Policy is already enabled %s", str(policy))
def loadActions(self):
"""
Loads Actions from the c4/system/policies directory.
"""
actions = getModuleClasses(c4.policyengine.actions, Action)
actions.extend(getModuleClasses(c4.policies, Action))
# filter out base classes
actions = [action for action in actions if action != Action and action != ActionReference]
self.addActions(actions)
def loadDefaultPolicies(self, orderedList=None, includePoliciesFromDatabase=False):
"""
Loads Policies from the c4/system/policies directory.
:param orderedList: List of policy ids to include
:type orderedList: list
:param includePoliciesFromDatabase: Include policies form database?
:type includePoliciesFromDatabase: boolean
"""
# short circuit for empty list
if not orderedList:
self.log.info("Configuration did not specify any policies to load" )
return
# load policies
policies = getModuleClasses(c4.policies, Policy)
# filter out base class
policies = [policy for policy in policies if policy != Policy]
# build temporary unordered dict
policyDict = {}
for policy in policies:
policyDict[policy.id] = policy
wrappedPolicyDict = {}
wrappedPolicies = getModuleClasses(c4.policies, PolicyWrapper)
# remove base class
if PolicyWrapper in wrappedPolicies:
wrappedPolicies.remove(PolicyWrapper)
for wrappedPolicy in wrappedPolicies:
policyString = wrappedPolicy.id + ":" + wrappedPolicy.policy
try:
policy = self.policyParser.parsePolicy(policyString)
wrappedPolicyDict[policy.id] = policy
except Exception as exception:
self.log.exception("could not parse policy wrapper '%s': %s", policyString, exception)
dbPolicyDict = {}
if includePoliciesFromDatabase:
dbPolicies = self.getPoliciesFromDatabase()
for policy in dbPolicies:
dbPolicyDict[policy.id] = policy
self.policies.clear()
# We are specifying an order for loading the policies, but we have 3 sources the policies could be loaded from,
# and the different sources have slightly different behaviors so go through the list to see if the policy can
# be found and then load it based on the source; ie class properties will be type 1, policy wrapper will be type 2,
# and policies that were custom added will be loaded from the database as type 3
# Note that because we are not loading all policies anymore, it isn't sufficient to just check to see if the policy
# database has policies loaded; also because we support dynamic loading it isn't sufficient to always load defaults
for policyId in orderedList:
try:
policy = policyDict.get(policyId, None)
policyType = 1
if not policy:
policy = wrappedPolicyDict.get(policyId, None)
policyType = 2
if not policy:
policy = dbPolicyDict.get(policyId, None)
policyType = 3
if policy:
if policyType == 1:
self.log.debug("loading default policy '%s' of type '%s.%s'", policy.id, policy.__module__, policy.__name__)
self.addPolicy(policy(self.cache))
elif policyType == 2:
self.log.debug("loading default policy '%s' from wrapper", policy.id)
self.addPolicy(policy)
else:
self.log.debug("loading default policy '%s' from database", policy.id)
self.addPolicy(policy)
else:
self.log.error("Configuration error - policy: '%s' not found", policyId )
except Exception as exception:
self.log.exception(exception)
def loadEvents(self):
"""
Loads Events from the c4/system/policies directory.
"""
events = getModuleClasses(c4.policyengine.events, Event)
events.extend(getModuleClasses(c4.policies, Event))
# filter out base classes and operators
events = [event for event in events if event != Event and not issubclass(event, (UnaryOperator, BinaryOperator))]
self.addEvents(events)
def getPoliciesFromDatabase(self):
"""
Get policies from the policy database table
:returns: policies
:rtype: [:class:`Policy`]
"""
return self.convertToPolicies(self.policyDatabase.getPolicyInfos())
def loadPolicy(self, string):
"""
Load a policy into the engine
:param string: policy string
:type string: str
"""
try:
policy = self.policyParser.parsePolicy(string)
self.addPolicy(policy)
except Exception as exception:
self.log.error("could not load policy '%s': %s", string, exception)
def run(self, policy=None):
"""
If a policy is given then check if specified event
matches and perform actions accordingly, followed
by running its child policies.
If no policy is specified start with root policies.
:param policy: policy
:type policy: :class:`Policy`
"""
if policy:
start = datetime.utcnow()
if policy.evaluateEvent():
self.log.debug("event match for '%s'", policy)
policy.performActions()
if hasattr(policy, "policies"):
for childPolicy in policy.policies.values():
if childPolicy.state == States.ENABLED:
try:
self.run(childPolicy)
except Exception as exception:
self.log.exception(exception)
else:
self.log.debug("no event match for '%s'", policy)
end = datetime.utcnow()
self.log.debug("executing policy '%s' took %s", policy.id, end-start)
self.checkPerformanceIssues(policy.id, start, end)
else:
start = datetime.utcnow()
# clear cache on events
self.cache.clear()
self.cache.enabled = True
# go through policies in order
for policy in self.policies.values():
if policy.state == States.ENABLED:
try:
self.run(policy)
except Exception as exception:
self.log.exception(exception)
else:
self.log.debug("'%s' disabled", policy.id)
# clear cache on events
self.cache.clear()
self.cache.enabled = False
end = datetime.utcnow()
self.log.debug("executing policy engine took %s", end-start)
def updateFromDatabase(self):
"""
Update all policies from database (includes list and state).
"""
start = datetime.utcnow()
# check policy list to see if it needs updating
node = self.properties.get('node', None)
name = self.properties.get('name', None)
expectedPolicies = None
role = None
if node and name:
configuration = Backend().configuration
role = configuration.getRole(node)
if role != Roles.DISABLED:
roleInfo = configuration.getRoleInfo(role=role)
if roleInfo:
deviceInfo = roleInfo.devices.get(name, None)
if deviceInfo:
properties = deviceInfo.properties
if properties:
expectedPolicies = properties.get('policies', [])
else:
self.log.info("Node is disabled removing policies...")
expectedPolicies = []
self.policies.clear()
if expectedPolicies or (role and role == Roles.DISABLED):
replacePolicies = False
# check for extra policies
for policy in self.policies.keys():
if policy not in expectedPolicies:
replacePolicies = True
break
if not replacePolicies:
# check for missing policies
for policy in expectedPolicies:
if policy not in self.policies.keys():
replacePolicies = True
break
# if mismatch then replace all policies (since order matters)
if replacePolicies:
self.log.info("Expected policies: %s", str(expectedPolicies))
self.log.info("Actual policies: %s", str(self.policies.keys()))
self.log.info("Correcting policies...")
includePoliciesFromDatabase = self.properties.get("include.policies.database", False)
self.loadDefaultPolicies(orderedList=expectedPolicies, includePoliciesFromDatabase=includePoliciesFromDatabase)
#TODO send device name a setPolicies operation message to update it's status for reporting
address = socket.gethostname().split(".")[0]
client = RouterClient(address)
client.forwardMessage(Operation("{0}/{1}".format(node, name),
"setPolicies",
policies=expectedPolicies))
# go through policies in order to update states
for key, policy in self.policies.items():
dbState = self.policyDatabase.getPolicyState(policy.id)
if policy.state != dbState:
policy.state = dbState
self.policies[key] = policy
end = datetime.utcnow()
self.log.debug("updating policy engine took %s", end-start)
def checkPerformanceIssues(self, policyName, start, end):
"""
TODO: documentation
"""
# this value might require tweaking for complex policies and multinode systems
policyPerfomanceWarn = self.properties.get("performance.warning.threshold", 2)
execTime = (end-start).total_seconds()
if execTime > policyPerfomanceWarn:
self.log.warning("Executing policy '%s' has taken: %s seconds", policyName, execTime)
class PolicyInfo(JSONSerializable):
"""
Policy information
:param name: name
:type name: str
:param representation: representation
:type representation: str
:param state: state
:type state: :class:`States`
:param policyType: type
:type policyType: str
:param properties: properties
:type properties: dict
"""
def __init__(self, name, representation, state, policyType, properties):
self.name = name
self.representation = representation
self.state = state
self.type = policyType
self.policies = None
self.properties = properties
def addPolicyInfo(self, policyInfo):
"""
Add child policy information
:param policyInfo: policy info
:type policyInfo: :class:`PolicyInfo`
:returns: :class:`PolicyInfo`
"""
if self.policies is None:
self.policies = OrderedDict()
if policyInfo.name in self.policies:
log.error("'%s' already part of '%s'", policyInfo.name, self.name)
else:
self.policies[policyInfo.name] = policyInfo
return self
@ClassLogger
class PolicyParser(object):
"""
Base implementation of a policy parser using ``pyparsing``
:param policyEngine: policy engine
:type policyEngine: :class:`PolicyEngine`
"""
def __init__(self, policyEngine):
self.policyEngine = policyEngine
self.unaryOperators = {}
self.binaryOperators = {}
import pyparsing
# constant values
self.stringConstantElement = (pyparsing.QuotedString("\"", unquoteResults=True) |
pyparsing.QuotedString("'", unquoteResults=True))
self.numberConstantElement = pyparsing.Word(pyparsing.nums + ".")
def numberConstantElementParseAction(tokens):
"""
Parse number constants into `float` or `int`
"""
self.log.debug("found number constant '%s'", tokens[0])
if "." in tokens[0]:
try:
return float(tokens[0])
except:
pass
else:
try:
return int(tokens[0])
except:
pass
return tokens
self.numberConstantElement.addParseAction(numberConstantElementParseAction)
self.constantElement = self.stringConstantElement | self.numberConstantElement
# key-value pair constant
self.namedConstantElement = pyparsing.Word(pyparsing.alphanums) + "=" + self.constantElement
def namedConstantParseAction(string, location, tokens):
"""
Parse named constant into a key-value dictionary
"""
self.log.debug("found named constant '%s = %s'", tokens[0], tokens[2])
return {tokens[0]: tokens[2]}
self.namedConstantElement.addParseAction(namedConstantParseAction)
self.eventReferenceElement = pyparsing.Forward()
# parameters
self.parameterElement = self.constantElement | self.namedConstantElement | self.eventReferenceElement
self.parametersElement = self.parameterElement + pyparsing.ZeroOrMore(pyparsing.Suppress(",") + self.parameterElement)
def parametersParseAction(string, location, tokens):
"""
Parse parameters into arguments and key value arguments tuple
"""
arguments = []
keyValueArguments = {}
for parameter in tokens:
self.log.debug("found parameter '%s'", repr(parameter))
if isinstance(parameter, dict):
keyValueArguments.update(parameter)
else:
arguments.append(parameter)
return (arguments, keyValueArguments)
self.parametersElement.addParseAction(parametersParseAction)
# event references
self.eventReferenceElement << (
(
pyparsing.Word(pyparsing.alphanums + ".") +
pyparsing.Suppress("(") +
pyparsing.Optional(self.parametersElement) +
pyparsing.Suppress(")")) |
pyparsing.Word(pyparsing.alphanums + ".")
)
def eventReferenceElementParseAction(string, location, tokens):
"""
Parse event references into a cachable event
"""
if len(tokens) == 1:
self.log.debug("found event reference '%s'", tokens[0])
parameters = ([], {})
else:
self.log.debug("found event reference '%s%s'", tokens[0], repr(tokens[1]))
parameters = tokens[1]
if tokens[0] not in self.policyEngine.events:
raise pyparsing.ParseFatalException(
string, location,
"found unknown event reference '{0}'".format(repr(tokens[0])))
# set up event implementation
event = self.policyEngine.events[tokens[0]]()
self.checkParameters(event, "evaluate", parameters[0], parameters[1])
return CachableEvent(
self.policyEngine.cache,
EventReference(event, parameters[0], parameters[1]))
self.eventReferenceElement.addParseAction(eventReferenceElementParseAction)
# event operators
self.unaryOperatorElement = pyparsing.Or([])
self.binaryOperatorElement = pyparsing.Or([])
# TODO: outsource to load function?
unaryOperatorList = getModuleClasses(c4.policyengine.events.operators, UnaryOperator)
for operatorImplementation in unaryOperatorList:
self.unaryOperators[operatorImplementation.id] = operatorImplementation
self.unaryOperatorElement.append(pyparsing.Or(operatorImplementation.id))
binaryOperatorList = getModuleClasses(c4.policyengine.events.operators, BinaryOperator)
for operatorImplementation in binaryOperatorList:
self.binaryOperators[operatorImplementation.id] = operatorImplementation
self.binaryOperatorElement.append(pyparsing.Or(operatorImplementation.id))
# basic value event with an optional unary operator
self.valueEventElement = (
pyparsing.Optional(self.unaryOperatorElement) +
(self.constantElement | self.eventReferenceElement)
)
def valueEventElementParseAction(string, location, tokens):
"""
Parse value event
"""
if len(tokens) == 1:
self.log.debug("found event '%s'", repr(tokens[0]))
return tokens[0]
# check for unary operators
if len(tokens) == 2:
self.log.debug("found event '%s %s'", tokens[0], repr(tokens[1]))
if tokens[0] in self.unaryOperators:
return self.unaryOperators[tokens[0]](tokens[1])
else:
raise pyparsing.ParseException("found unknown unary operator '{0}'".format(repr(tokens[0])))
self.valueEventElement.addParseAction(valueEventElementParseAction)
# complex event that may consist of a combination of events
self.eventElement = pyparsing.Forward()
self.eventElement << (
(
pyparsing.Optional(self.unaryOperatorElement) + pyparsing.Suppress("(") + self.eventElement + pyparsing.Suppress(")") +
pyparsing.Optional(
self.binaryOperatorElement +
pyparsing.Or([
pyparsing.Optional(self.unaryOperatorElement) + pyparsing.Suppress("(") + self.eventElement + pyparsing.Suppress(")"),
self.valueEventElement])
)
) |
(self.valueEventElement + self.binaryOperatorElement + self.valueEventElement) |
self.valueEventElement
)
def eventElementParseAction(string, location, tokens):
"""
Parse event
"""
if len(tokens) == 1:
self.log.debug("found event '%s'", repr(tokens[0]))
return tokens[0]
# check for unary operators
if len(tokens) == 2:
self.log.debug("found event '%s %s'", tokens[0], repr(tokens[1]))
if tokens[0] in self.unaryOperators:
return self.unaryOperators[tokens[0]](tokens[1])
else:
raise pyparsing.ParseException("found unknown unary operator '{0}'".format(repr(tokens[0])))
# check for binary operators
if len(tokens) == 3:
self.log.debug("found event '%s %s %s)'", repr(tokens[0]), tokens[1], repr(tokens[2]))
if tokens[1] in self.binaryOperators:
return self.binaryOperators[tokens[1]](tokens[0], tokens[2])
else:
raise pyparsing.ParseException("found unknown binary operator '{0}'".format(tokens[1]))
self.eventElement.addParseAction(eventElementParseAction)
# action identifier
self.actionIdElement = pyparsing.Word(pyparsing.alphanums + ".")
# action specified by an id and optional parameters
self.actionElement = (self.actionIdElement +
pyparsing.Suppress("(") + pyparsing.Optional(self.parametersElement) + pyparsing.Suppress(")"))
def actionElementParseAction(string, location, tokens):
"""
Parse action into an action reference
"""
if len(tokens) == 1:
self.log.debug("found action '%s'", tokens[0])
parameters = ([], {})
else:
self.log.debug("found action '%s%s'", tokens[0], repr(tokens[1]))
parameters = tokens[1]
if tokens[0] not in self.policyEngine.actions:
raise pyparsing.ParseFatalException(
string, location,
"found unknown action reference '{0}'".format(tokens[0]))
# set up action implementation
action = self.policyEngine.actions[tokens[0]]()
arguments = parameters[0]
keyValueArguments = parameters[1]
handlerArgSpec = inspect.getargspec(action.perform)
handlerArguments = handlerArgSpec[0][1:]
# check for named arguments
handlerKeyValueArguments = {}
if handlerArgSpec[3]:
keys = handlerArguments[-len(handlerArgSpec[3]):]
handlerKeyValueArguments = dict(zip(keys, handlerArgSpec[3]))
handlerArguments = handlerArguments[:len(handlerArguments)-len(handlerArgSpec[3])]
# make sure we have at least the number of arguments that the action requires
if len(handlerArguments) != len(arguments):
raise pyparsing.ParseFatalException(
string, location,
"action '{0}' requires {1} arguments but {2}: {3} are given".format(
action, len(handlerArguments), len(arguments), arguments))
# check for unknown named arguments
for key in keyValueArguments:
if key not in handlerKeyValueArguments:
raise pyparsing.ParseFatalException(
string, location,
"action '{0}' does not have a named argument '{1}'".format(action, key))
return ActionReference(self.policyEngine.actions[tokens[0]](), parameters[0], parameters[1])
self.actionElement.addParseAction(actionElementParseAction)
# list of actions
self.actionsElement = self.actionElement + pyparsing.ZeroOrMore(pyparsing.Suppress(",") + self.actionElement)
# policy element consisting of a name, an event and a set of actions
self.policyElement = pyparsing.Word(pyparsing.alphanums + "." + "_") + pyparsing.Suppress(":") + self.eventElement + pyparsing.Suppress("->") + self.actionsElement
def checkParameters(self, o, method, arguments, keyValueArguments):
"""
Check parameters for the specified method
:param o: object
:type o: object
:param method: method
:type method: str
:param arguments: arguments
:type arguments: list
:param keyValueArguments: key value arguments
:type keyValueArguments: dict
:raises ValueError: if parameters are not valid
"""
# set up implementation
handlerArgSpec = inspect.getargspec(getattr(o, method))
handlerArguments = handlerArgSpec[0][1:]
# check for named arguments
handlerKeyValueArguments = {}
if handlerArgSpec[3]:
keys = handlerArguments[-len(handlerArgSpec[3]):]
handlerKeyValueArguments = dict(zip(keys, handlerArgSpec[3]))
handlerArguments = handlerArguments[:len(handlerArguments)-len(handlerArgSpec[3])]
# make sure we have at least the number of arguments that the object requires
if len(handlerArguments) != len(arguments) and handlerArgSpec[1] is None:
raise ValueError("object '{0}' requires {1} arguments but {2}: {3} are given".format(
repr(o), len(handlerArguments), len(arguments), arguments))
# check for unknown named arguments
for key in keyValueArguments:
if key not in handlerKeyValueArguments:
raise ValueError("object '{0}' does not have a named argument '{1}'".format(repr(o), key))
def parseAction(self, string):
"""
Parse string into :class:`Action`
:returns: :class:`Action`
"""
return self.actionElement.parseString(string, parseAll=True)[0]
def parseActions(self, string):
"""
Parse string into multiple :class:`Action` s
:returns: [:class:`Action`]
"""
return self.actionsElement.parseString(string, parseAll=True)
def parseEvent(self, string):
"""
Parse string into :class:`Event`
:returns: :class:`Event`
"""
return self.eventElement.parseString(string, parseAll=True)[0]
def parsePolicy(self, string):
"""
Parse string into :class:`Policy`
:returns: :class:`Policy`
"""
policyItems = self.policyElement.parseString(string, parseAll=True)
return PolicyComponent(policyItems[0], policyItems[1], policyItems[2:])
@ClassLogger
class PolicyEngineProcess(multiprocessing.Process):
"""
Policy engine process
:param properties: properties
:type properties: dict
"""
def __init__(self, properties=None):
super(PolicyEngineProcess, self).__init__(name="Policy engine")
self.properties = properties or {}
self.initial = self.properties.get("policy.timer.initial", 5)
self.repeat = self.properties.get("policy.timer.repeat", -1)
self.interval = self.properties.get("policy.timer.interval", 10)
self.updateEnabled = self.properties.get("update.from.db", False)
def run(self):
"""
The implementation of the policy engine process
"""
policyEngine = PolicyEngine(properties=self.properties)
policies = policyEngine.policies
self.log.info("policies: %s", str(policies))
try:
# wait until device managers transition to running before starting
node = self.properties.get('node', None)
devicesNotRunning = self.getDevicesNotRunning(node)
while len(devicesNotRunning) > 0:
self.log.info("Waiting for devices to become running: %s", ", ".join(devicesNotRunning))
time.sleep(self.interval)
devicesNotRunning = self.getDevicesNotRunning(node)
time.sleep(self.initial)
if self.repeat < 0:
while True:
if self.updateEnabled:
policyEngine.updateFromDatabase()
policyEngine.run()
time.sleep(self.interval)
else:
while self.repeat >= 0:
policyEngine.run()
time.sleep(self.interval)
self.repeat -= 1
except KeyboardInterrupt:
self.log.debug("Exiting %s", self.name)
except:
self.log.debug("Forced exiting %s", self.name)
self.log.error(traceback.format_exc())
def getDevicesNotRunning(self, node):
"""
Build a list of devices on this node that are not in running state.
:param node: node to get devices for
:type node: str
:returns: list of device names
"""
devices = Backend().configuration.getDevices(node, flatDeviceHierarchy=True)
devicesNotRunning = []
for deviceInfo in devices.values():
if deviceInfo.state != ConfigStates.RUNNING:
devicesNotRunning.append(deviceInfo.name)
return devicesNotRunning
class PolicyProperties(JSONSerializable):
"""
Policy properties
"""
def __init__(self):
self.description = None
@ClassLogger
class PolicyWrapper(object):
"""
Derived classes need to provide an id and policy string.
The PolicyWrapper class is used to load policies from disk,
see c4/system/policies/
"""
id = ""
policy = ""
@ClassLogger
class UnaryOperator(Event):
"""
A unary operator base class
:param one: event one
:type one: :class:`Event`
"""
__metaclass__ = ABCMeta
id = "unaryOperator"
def __init__(self, one):
super(UnaryOperator, self).__init__()
self.one = ValueEvent.create(one)
@abstractmethod
def evaluateOperation(self, one):
"""
Evaluate the unary operation with the specified operands
"""
def evaluate(self):
one = self.one.evaluate()
return self.evaluateOperation(one)
def __repr__(self, *args, **kwargs):
return "({0} {1})".format(self.id, repr(self.one))
def __str__(self, *args, **kwargs):
return "({0} {1} -> {2})".format(self.id, self.one, self.evaluate())
@ClassLogger
class ValueEvent(Event):
"""
A base value event
:param value: value
"""
id = "value"
def __init__(self, value):
super(ValueEvent, self).__init__()
self._value = value
def evaluate(self):
"""
Return the value of the event
:returns: value
"""
return self._value
@staticmethod
def create(value):
"""
Create a :class:`ValueEvent` given the value.
.. note::
If ``value`` is already an :class:`Event` then
itself is returned instead
:param value: value
"""
if isinstance(value, Event):
return value
else:
return ValueEvent(value)
def __repr__(self, *args, **kwargs):
return repr(self._value)
def __str__(self, *args, **kwargs):
return str(self.evaluate())
| en | 0.710933 | Copyright (c) IBM 2015-2017. All Rights Reserved. Project name: c4-policy-engine This project is licensed under the MIT License, see LICENSE A policy engine implementation with support for events and actions as well as textual representations Enumeration of states An event implementation # TODO: type, group, severity, description # see http://www-01.ibm.com/support/knowledgecenter/SSULQD_7.1.0/com.ibm.nz.adm.doc/r_sysadm_template_event_rules.html Evaluate the event .. note:: Subclasses should implement this :returns: value Value of the event A reference to an :class:`Event` :param event: event :type event: :class:`Event` :param arguments: arguments :param keyValueArguments: key value arguments Evaluate the specified event using the given arguments and key value arguments :returns: result # TODO: what about if the value is actually None? An action implementation Perform specified action .. note:: Subclasses should add arguments as needed :returns: result A reference to an :class:`Action` :param action: action :type action: :class:`Action` :param arguments: arguments :param keyValueArguments: key value arguments Perform specified action using the given arguments and key value arguments :returns: result A binary operator base class :param one: event one :type one: :class:`Event` :param two: event two :type two: :class:`Event` Evaluate the binary operation with the specified operands A memory-based dictionary cache An event which value can be cached :param cache: cache :type cache: :class:`Cache` :param event: event :type event: :class:`Event` A policy base class :param cache: cache :type cache: :class:`Cache` Formatted description based on the doc string Evaluate the event to determine if the action for this policy should to be performed Perform actions specified for the policy if the event evaluated as ``True`` A policy component consisting of an event and respective list of actions :param name: name :type name: str :param event: event :type event: :class:`Event` :param actions: list of actions :param actions: [:class:`ActionReference`] Add a child policy :param policy: policy :type policy: :class:`Policy` An abstraction of the underlying database where policies are stored Add a policy :param fullPolicyName: fully qualified policy name :type fullPolicyName: str :param policy: policy :type policy: :class:`Policy` # no parent # with the ability to run policy engine on multiple nodes but with shared database this is acceptable # check if we can add children Add a policy :param policy: policy :type policy: :class:`Policy` Remove all policies Disables the policy in the database given its name :param fullPolicyName: fully qualified policy name :type fullPolicyName: str Enables the policy in the database given its name :param fullPolicyName: fully qualified policy name :type fullPolicyName: str Get key for the specified policy :param fullPolicyName: fully qualified policy name :type fullPolicyName: str :returns: key :rtype: str Get policies based on parent key and the already retrieved values :param parentKey: parent key :type parentKey: str :param policyInfoMapping: policy information mapping of key-value :type policyInfoMapping: dict Get number of top level policies :returns: number of top level policies :rtype: int Get policy info for the specified policy :param fullPolicyName: fully qualified policy name :type fullPolicyName: str :returns: policy info :rtype: :class:`PolicyInfo` # map from key to value and deserialize value automatically # deal with policy information Get all policy infos :returns: list of policy infos :rtype: [:class:`PolicyInfo`] Get the state of 'policy' if it exists :param fullPolicyName: fully qualified policy name :type fullPolicyName: str :returns: state of the policy if it exists else None :rtype: :class:`States` Does the specified policy already exist :param fullPolicyName: fully qualified policy name :type fullPolicyName: str :returns: whether policy exists :rtype: bool Policy engine that allows iterating over policies and performing their actions based on whether the specified event matches :param properties: properties :type properties: dict Add known action :param action: action :type action: :class:`Action` Add known actions :param actions: actions :type actions: [:class:`Action`] Add known event :param event: event :type event: :class:`Event` Add known events :param events: events :type events: [:class:`Event`] Add a policy :param policy: policy :type policy: :class:`Policy` Add policies :param policies: policies :type policies: [:class:`Policy`] Convert policy infos into actual policies :param policyInfos: policy infos :type policyInfos: [:class:`PolicyInfo`] :returns: policies :rtype: [:class:`Policy`] # load children # get class info # load class from module # create instance based off constructor Disables the given policy # disable the policy in memory and in the database Enables the given policy # enable the policy in memory and in the database Loads Actions from the c4/system/policies directory. # filter out base classes Loads Policies from the c4/system/policies directory. :param orderedList: List of policy ids to include :type orderedList: list :param includePoliciesFromDatabase: Include policies form database? :type includePoliciesFromDatabase: boolean # short circuit for empty list # load policies # filter out base class # build temporary unordered dict # remove base class # We are specifying an order for loading the policies, but we have 3 sources the policies could be loaded from, # and the different sources have slightly different behaviors so go through the list to see if the policy can # be found and then load it based on the source; ie class properties will be type 1, policy wrapper will be type 2, # and policies that were custom added will be loaded from the database as type 3 # Note that because we are not loading all policies anymore, it isn't sufficient to just check to see if the policy # database has policies loaded; also because we support dynamic loading it isn't sufficient to always load defaults Loads Events from the c4/system/policies directory. # filter out base classes and operators Get policies from the policy database table :returns: policies :rtype: [:class:`Policy`] Load a policy into the engine :param string: policy string :type string: str If a policy is given then check if specified event matches and perform actions accordingly, followed by running its child policies. If no policy is specified start with root policies. :param policy: policy :type policy: :class:`Policy` # clear cache on events # go through policies in order # clear cache on events Update all policies from database (includes list and state). # check policy list to see if it needs updating # check for extra policies # check for missing policies # if mismatch then replace all policies (since order matters) #TODO send device name a setPolicies operation message to update it's status for reporting # go through policies in order to update states TODO: documentation # this value might require tweaking for complex policies and multinode systems Policy information :param name: name :type name: str :param representation: representation :type representation: str :param state: state :type state: :class:`States` :param policyType: type :type policyType: str :param properties: properties :type properties: dict Add child policy information :param policyInfo: policy info :type policyInfo: :class:`PolicyInfo` :returns: :class:`PolicyInfo` Base implementation of a policy parser using ``pyparsing`` :param policyEngine: policy engine :type policyEngine: :class:`PolicyEngine` # constant values Parse number constants into `float` or `int` # key-value pair constant Parse named constant into a key-value dictionary # parameters Parse parameters into arguments and key value arguments tuple # event references Parse event references into a cachable event # set up event implementation # event operators # TODO: outsource to load function? # basic value event with an optional unary operator Parse value event # check for unary operators # complex event that may consist of a combination of events Parse event # check for unary operators # check for binary operators # action identifier # action specified by an id and optional parameters Parse action into an action reference # set up action implementation # check for named arguments # make sure we have at least the number of arguments that the action requires # check for unknown named arguments # list of actions # policy element consisting of a name, an event and a set of actions Check parameters for the specified method :param o: object :type o: object :param method: method :type method: str :param arguments: arguments :type arguments: list :param keyValueArguments: key value arguments :type keyValueArguments: dict :raises ValueError: if parameters are not valid # set up implementation # check for named arguments # make sure we have at least the number of arguments that the object requires # check for unknown named arguments Parse string into :class:`Action` :returns: :class:`Action` Parse string into multiple :class:`Action` s :returns: [:class:`Action`] Parse string into :class:`Event` :returns: :class:`Event` Parse string into :class:`Policy` :returns: :class:`Policy` Policy engine process :param properties: properties :type properties: dict The implementation of the policy engine process # wait until device managers transition to running before starting Build a list of devices on this node that are not in running state. :param node: node to get devices for :type node: str :returns: list of device names Policy properties Derived classes need to provide an id and policy string. The PolicyWrapper class is used to load policies from disk, see c4/system/policies/ A unary operator base class :param one: event one :type one: :class:`Event` Evaluate the unary operation with the specified operands A base value event :param value: value Return the value of the event :returns: value Create a :class:`ValueEvent` given the value. .. note:: If ``value`` is already an :class:`Event` then itself is returned instead :param value: value | 2.118261 | 2 |
rama/runner.py | seblee97/vision_task_similarity | 0 | 6617005 | <gh_stars>0
import copy
import itertools
import os
from typing import Dict, List, Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from rama import constants, dataset, network, ewc
from run_modes import base_runner
class Runner(base_runner.BaseRunner):
"""
Runner class for rama experiments.
Extends base runner from run_modes module.
"""
def __init__(self, config, unique_id: str = "") -> None:
"""
Class constructor.
Args:
config: configuration object.
unique_id: optional unique runner id.
"""
self._first_task_epochs = config.switch_epoch
self._total_epochs = config.total_epochs
self._early_stopping = config.early_stopping
self._first_task_best_loss = np.inf
self._first_task_best_loss_index: int
self._input_dimension = config.input_dimension
self._hidden_dimension = config.hidden_dimension
self._output_dimension = config.output_dimension
self._labels = config.labels
self._network, self._optimiser = self._setup_network(config=config)
self._train_dataloaders, self._test_dataloaders = self._setup_data(
config=config
)
self._loss_function_type = config.loss_fn
self._loss_function = self._setup_loss_function(config=config)
self._ewc_importance = config.ewc_importance
self._device = config.experiment_device
super().__init__(config=config, unique_id=unique_id)
def _get_data_columns(self) -> List[str]:
"""Implements abstract method from parent base runner class.
Sets up logging columns.
Returns:
columns: list of strings denoting scalars to be logged.
"""
columns = [
constants.EPOCH_LOSS,
f"{constants.TEST}_{constants.LOSS}_0",
f"{constants.TEST}_{constants.LOSS}_1",
f"{constants.TEST}_{constants.ACCURACY}_0",
f"{constants.TEST}_{constants.ACCURACY}_1",
constants.NODE_NORM_ENTROPY,
]
columns.extend(
[f"{constants.SELF_OVERLAP}_{i}" for i in range(self._hidden_dimension)]
)
columns.extend(
[f"{constants.NODE_FISCHER}_{0}_{i}" for i in range(self._hidden_dimension)]
)
columns.extend(
[f"{constants.NODE_FISCHER}_{1}_{i}" for i in range(self._hidden_dimension)]
)
columns.extend(
[
f"{constants.SECOND_LAYER_DERIVATIVES}_{0}_{j}_{i}"
for i, j in itertools.product(
range(self._hidden_dimension), range(self._output_dimension)
)
]
)
columns.extend(
[
f"{constants.SECOND_LAYER_DERIVATIVES}_{1}_{j}_{i}"
for i, j in itertools.product(
range(self._hidden_dimension), range(self._output_dimension)
)
]
)
columns.extend(
[
f"{constants.NODE_DROPOUT_LOSS_DIFFS}_0_{i}"
for i in range(self._hidden_dimension)
]
)
columns.extend(
[
f"{constants.NODE_DROPOUT_LOSS_DIFFS}_1_{i}"
for i in range(self._hidden_dimension)
]
)
columns.extend(
[
f"{constants.NODE_DROPOUT_ACC_DIFFS}_0_{i}"
for i in range(self._hidden_dimension)
]
)
columns.extend(
[
f"{constants.NODE_DROPOUT_ACC_DIFFS}_1_{i}"
for i in range(self._hidden_dimension)
]
)
return columns
def _setup_data(self, config):
mixed_train_1, mixed_test_1 = dataset.FashionMNISTSplitter.get_mixed_dataloader(
config.indices[0],
config.indices[1],
mixing=config.mixing[0],
label_1=config.labels[0],
label_2=config.labels[1],
batch_size=config.batch_size,
shuffle=True,
whiten=config.whitening[0],
)
mixed_train_2, mixed_test_2 = dataset.FashionMNISTSplitter.get_mixed_dataloader(
config.indices[0],
config.indices[1],
mixing=config.mixing[1],
label_1=config.labels[0],
label_2=config.labels[1],
batch_size=config.batch_size,
shuffle=True,
whiten=config.whitening[1],
)
train_dataloaders = [mixed_train_1, mixed_train_2]
test_dataloaders = [mixed_test_1, mixed_test_2]
return train_dataloaders, test_dataloaders
def _setup_network(self, config):
net = network.TwoLayerRegressionNetwork(
input_dim=config.input_dimension,
hidden_dim=config.hidden_dimension,
output_dim=config.output_dimension,
nonlinearity=config.nonlinearity,
num_heads=len(config.indices),
biases=config.biases,
)
optimiser = torch.optim.SGD(params=net.parameters(), lr=config.learning_rate)
return net, optimiser
def _setup_loss_function(self, config):
if config.loss_fn == constants.MSE:
loss_fn = nn.MSELoss()
elif config.loss_fn == constants.CROSS_ENTROPY:
loss_fn = nn.CrossEntropyLoss()
return loss_fn
def _obtain_target_mappings(self, config):
target_mappings = {}
label_mappings = {}
for task_index, dataset_indices in enumerate(config.indices):
target_mapping, label_mapping = dataset.FashionMNISTSplitter.get_mapping(
index_1=dataset_indices[0], index_2=dataset_indices[1]
)
target_mappings = {**target_mappings, **target_mapping}
label_mappings = {**label_mappings, **label_mapping}
return target_mappings, label_mappings
def _compute_loss(self, prediction, target):
if self._loss_function_type == constants.MSE:
return self._loss_function(prediction.flatten(), target.to(torch.float))
elif self._loss_function_type == constants.CROSS_ENTROPY:
return self._loss_function(prediction, target.to(torch.long))
def _compute_correct(self, prediction, target):
if self._loss_function_type == constants.MSE:
if self._labels == [-1, 1]:
correct = (torch.sign(prediction) == target).item()
elif self._labels == [0, 1]:
correct = ((prediction.flatten() > 0.5) == target).item()
elif self._loss_function_type == constants.CROSS_ENTROPY:
softmax_prediction = F.softmax(prediction, dim=1)
class_prediction = torch.argmax(softmax_prediction, dim=1)
correct = sum(class_prediction == target).item()
return correct
def train(self):
self._pre_train_logging()
for e in range(self._first_task_epochs):
self._train_test_loop(epoch=e, task_index=0)
if self._early_stopping:
# load 'best' model from first task
self._logger.info(
(
"Early stopping: loading model from epoch "
f"{self._first_task_best_loss_index}"
)
)
self._network.load(
load_path=os.path.join(
self._checkpoint_path,
f"network_{self._first_task_best_loss_index}.pt",
)
)
for e in range(self._first_task_epochs, self._total_epochs):
if self._ewc_importance is not None:
ewc_module = ewc.EWC(
device=self._device, importance=self._ewc_importance
)
ewc_module.compute_first_task_importance(
network=self._network,
previous_task_index=0,
loss_function=self._compute_loss,
dataloader=self._test_dataloaders[0],
)
else:
ewc_module = None
self._train_test_loop(epoch=e, task_index=1, ewc=ewc_module)
def _pre_train_logging(self):
node_norms = self._compute_node_norms()
node_norm_entropy = self._compute_norms_entropy(node_norms=node_norms)
node_fischers_0 = self._compute_node_fischers(task_index=0)
node_fischers_1 = self._compute_node_fischers(task_index=1)
second_layer_derivatives_0 = self._second_layer_derivative(task_index=0)
second_layer_derivatives_1 = self._second_layer_derivative(task_index=1)
(
node_dropout_loss_diffs_0,
node_dropout_acc_diffs_0,
) = self._dropout_node_metrics(task_index=0)
(
node_dropout_loss_diffs_1,
node_dropout_acc_diffs_1,
) = self._dropout_node_metrics(task_index=1)
base_logging_dict = {constants.NODE_NORM_ENTROPY: node_norm_entropy}
overlap_logging_dict = {
f"{constants.SELF_OVERLAP}_{i}": norm for i, norm in enumerate(node_norms)
}
fischer_0_logging_dict = {
f"{constants.NODE_FISCHER}_{0}_{i}": fischer
for i, fischer in enumerate(node_fischers_0)
}
fischer_1_logging_dict = {
f"{constants.NODE_FISCHER}_{1}_{i}": fischer
for i, fischer in enumerate(node_fischers_1)
}
second_layer_derivatives_0_logging_dict = {
f"{constants.SECOND_LAYER_DERIVATIVES}_{0}_{i}_{j}": derivative
for (i, j), derivative in np.ndenumerate(second_layer_derivatives_0)
}
second_layer_derivatives_1_logging_dict = {
f"{constants.SECOND_LAYER_DERIVATIVES}_{1}_{i}_{j}": derivative
for (i, j), derivative in np.ndenumerate(second_layer_derivatives_1)
}
node_dropout_loss_diffs_0_logging_dict = {
f"{constants.NODE_DROPOUT_LOSS_DIFFS}_0_{i}": diff
for i, diff in enumerate(node_dropout_loss_diffs_0)
}
node_dropout_loss_diffs_1_logging_dict = {
f"{constants.NODE_DROPOUT_LOSS_DIFFS}_1_{i}": diff
for i, diff in enumerate(node_dropout_loss_diffs_1)
}
node_dropout_acc_diffs_0_logging_dict = {
f"{constants.NODE_DROPOUT_ACC_DIFFS}_0_{i}": diff
for i, diff in enumerate(node_dropout_acc_diffs_0)
}
node_dropout_acc_diffs_1_logging_dict = {
f"{constants.NODE_DROPOUT_ACC_DIFFS}_1_{i}": diff
for i, diff in enumerate(node_dropout_acc_diffs_1)
}
logging_dict = {
**base_logging_dict,
**overlap_logging_dict,
**fischer_0_logging_dict,
**fischer_1_logging_dict,
**second_layer_derivatives_0_logging_dict,
**second_layer_derivatives_1_logging_dict,
**node_dropout_loss_diffs_0_logging_dict,
**node_dropout_loss_diffs_1_logging_dict,
**node_dropout_acc_diffs_0_logging_dict,
**node_dropout_acc_diffs_1_logging_dict,
}
self._epoch_log(logging_dict=logging_dict, epoch=0)
def _train_test_loop(
self, epoch: int, task_index: int, ewc: Optional[ewc.EWC] = None
):
train_epoch_loss = self._train_loop(task_index=task_index, ewc_module=ewc)
test_loss_0, test_accuracy_0 = self._test_loop(task_index=0)
test_loss_1, test_accuracy_1 = self._test_loop(task_index=1)
node_norms = self._compute_node_norms()
node_norm_entropy = self._compute_norms_entropy(node_norms=node_norms)
node_fischers_0 = self._compute_node_fischers(task_index=0)
node_fischers_1 = self._compute_node_fischers(task_index=1)
second_layer_derivatives_0 = self._second_layer_derivative(task_index=0)
second_layer_derivatives_1 = self._second_layer_derivative(task_index=1)
(
node_dropout_loss_diffs_0,
node_dropout_acc_diffs_0,
) = self._dropout_node_metrics(task_index=0)
(
node_dropout_loss_diffs_1,
node_dropout_acc_diffs_1,
) = self._dropout_node_metrics(task_index=1)
if task_index == 0:
if self._early_stopping:
if test_loss_0 < self._first_task_best_loss:
self._first_task_best_loss_index = epoch
self._first_task_best_loss = test_loss_0
self._network.checkpoint(
save_path=os.path.join(
self._checkpoint_path, f"network_{epoch}.pt"
)
)
base_logging_dict = {
constants.EPOCH_LOSS: train_epoch_loss,
f"{constants.TEST}_{constants.LOSS}_0": test_loss_0,
f"{constants.TEST}_{constants.LOSS}_1": test_loss_1,
f"{constants.TEST}_{constants.ACCURACY}_0": test_accuracy_0,
f"{constants.TEST}_{constants.ACCURACY}_1": test_accuracy_1,
constants.NODE_NORM_ENTROPY: node_norm_entropy,
}
overlap_logging_dict = {
f"{constants.SELF_OVERLAP}_{i}": norm for i, norm in enumerate(node_norms)
}
fischer_0_logging_dict = {
f"{constants.NODE_FISCHER}_{0}_{i}": fischer
for i, fischer in enumerate(node_fischers_0)
}
fischer_1_logging_dict = {
f"{constants.NODE_FISCHER}_{1}_{i}": fischer
for i, fischer in enumerate(node_fischers_1)
}
second_layer_derivatives_0_logging_dict = {
f"{constants.SECOND_LAYER_DERIVATIVES}_{0}_{i}_{j}": derivative
for (i, j), derivative in np.ndenumerate(second_layer_derivatives_0)
}
second_layer_derivatives_1_logging_dict = {
f"{constants.SECOND_LAYER_DERIVATIVES}_{1}_{i}_{j}": derivative
for (i, j), derivative in np.ndenumerate(second_layer_derivatives_1)
}
node_dropout_loss_diffs_0_logging_dict = {
f"{constants.NODE_DROPOUT_LOSS_DIFFS}_0_{i}": diff
for i, diff in enumerate(node_dropout_loss_diffs_0)
}
node_dropout_loss_diffs_1_logging_dict = {
f"{constants.NODE_DROPOUT_LOSS_DIFFS}_1_{i}": diff
for i, diff in enumerate(node_dropout_loss_diffs_1)
}
node_dropout_acc_diffs_0_logging_dict = {
f"{constants.NODE_DROPOUT_ACC_DIFFS}_0_{i}": diff
for i, diff in enumerate(node_dropout_acc_diffs_0)
}
node_dropout_acc_diffs_1_logging_dict = {
f"{constants.NODE_DROPOUT_ACC_DIFFS}_1_{i}": diff
for i, diff in enumerate(node_dropout_acc_diffs_1)
}
logging_dict = {
**base_logging_dict,
**overlap_logging_dict,
**fischer_0_logging_dict,
**fischer_1_logging_dict,
**second_layer_derivatives_0_logging_dict,
**second_layer_derivatives_1_logging_dict,
**node_dropout_loss_diffs_0_logging_dict,
**node_dropout_loss_diffs_1_logging_dict,
**node_dropout_acc_diffs_0_logging_dict,
**node_dropout_acc_diffs_1_logging_dict,
}
self._epoch_log(logging_dict=logging_dict, epoch=epoch)
self._logger.info(f"Epoch {epoch + 1} loss: {train_epoch_loss}")
self._data_logger.checkpoint()
def _epoch_log(self, logging_dict: Dict[str, float], epoch: int):
for tag, scalar in logging_dict.items():
self._data_logger.write_scalar(tag=tag, step=epoch, scalar=scalar)
def _train_loop(self, task_index: int, ewc_module: Optional[ewc.EWC] = None):
self._network.switch(new_task_index=task_index)
loader = self._train_dataloaders[task_index]
size = len(loader.dataset)
epoch_loss = 0
for batch, (x, y) in enumerate(loader):
self._optimiser.zero_grad()
prediction = self._network(x)
loss = self._compute_loss(prediction, y)
if ewc_module is not None:
regularisation_term = ewc_module.penalty(self._network)
loss += regularisation_term
loss.backward()
self._optimiser.step()
epoch_loss += loss.item()
return epoch_loss / size
def _test_loop(self, task_index: int):
epoch_loss = 0
correct_instances = 0
loader = self._test_dataloaders[task_index]
size = len(loader.dataset)
with torch.no_grad():
for batch, (x, y) in enumerate(loader):
prediction = self._network.test_forward(x=x, head_index=task_index)
loss = self._compute_loss(prediction, y)
epoch_loss += loss.item()
correct = self._compute_correct(prediction, y)
correct_instances += correct
return epoch_loss / size, correct_instances / size
def _compute_norms_entropy(self, node_norms: np.ndarray) -> float:
"""Compute and log 'entropy' over node norms.
This pseudo-entropy is computed by:
- normalising the array of node norms
- binning these normalised values
- computing entropy over this binned distribution
Args:
epoch: epoch count (for logging).
node_norms: magnitudes of hidden units.
Returns:
pseudo_entropy: pseudo measure of node norm entropy.
"""
normalised_norms = node_norms / np.max(node_norms)
binned_norms, _ = np.histogram(normalised_norms)
dist = binned_norms / np.max(binned_norms)
pseudo_entropy = -1 * np.sum(
[(d + constants.EPS) * np.log(d + constants.EPS) for d in dist]
)
return pseudo_entropy
def _compute_node_norms(self) -> None:
network_copy = copy.deepcopy(self._network)
layer = network_copy.layer_weights
sel_sim = torch.mm(layer, layer.t()).numpy() / self._input_dimension
norms = np.diagonal(sel_sim)
return norms
def _compute_node_fischers(self, task_index: int) -> List:
loader = self._test_dataloaders[task_index]
size = len(loader.dataset)
self._network.switch(new_task_index=task_index)
node_fischers = [0 for _ in range(self._hidden_dimension)]
for batch, (x, y) in enumerate(loader):
pre_activation = self._network.input_to_hidden(x=x)
post_activation = self._network.activate(x=pre_activation)
prediction = self._network.hidden_to_output(
x=post_activation, head_index=task_index
)
loss = self._compute_loss(prediction, y)
derivative = torch.autograd.grad(loss, post_activation)[0]
for node_index, node_derivative in enumerate(derivative[0]):
node_fischers[node_index] += node_derivative.detach().item() ** 2 / size
return node_fischers
def _dropout_node_metrics(self, task_index: int):
"""Evaluate node importance by computing drop in
accuracy/rise in loss from dropping out node.
"""
standard_test_loss = 0
standard_correct_instances = 0
masked_losses = np.zeros(self._hidden_dimension)
masked_correct_instances = np.zeros(self._hidden_dimension)
loader = self._test_dataloaders[task_index]
size = len(loader.dataset)
self._network.switch(new_task_index=task_index)
with torch.no_grad():
for batch, (x, y) in enumerate(loader):
prediction = self._network.test_forward(x=x, head_index=task_index)
loss = self._compute_loss(prediction, y)
standard_test_loss += loss.item()
correct = self._compute_correct(prediction, y)
standard_correct_instances += correct
for i in range(self._hidden_dimension):
pre_activation = self._network.input_to_hidden(x)
post_activation = self._network.activate(pre_activation)
# mask post activation
post_activation[:, i] = 0
masked_prediction = self._network.hidden_to_output(
post_activation, task_index
)
masked_loss = self._compute_loss(masked_prediction, y)
masked_losses[i] += masked_loss.item()
masked_correct = self._compute_correct(masked_prediction, y)
masked_correct_instances[i] += masked_correct
masked_loss_diffs = masked_losses / size - standard_test_loss / size
masked_acc_diffs = (
masked_correct_instances / size - standard_correct_instances / size
)
return masked_loss_diffs, masked_acc_diffs
def _second_layer_derivative(self, task_index: int) -> List:
loader = self._test_dataloaders[task_index]
size = len(loader.dataset)
self._network.switch(new_task_index=task_index)
second_layer_derivatives = [
[0 for i in range(self._hidden_dimension)]
for j in range(self._output_dimension)
]
for batch, (x, y) in enumerate(loader):
prediction = self._network(x)
loss = self._compute_loss(prediction, y)
loss.backward()
second_layer_derivative = [
p.grad for p in self._network._heads[task_index].parameters()
][0]
for i in range(self._output_dimension):
for j in range(self._hidden_dimension):
second_layer_derivatives[i][j] += (
second_layer_derivative[i][j].item() / size
)
return second_layer_derivatives
def post_process(self) -> None:
"""Solidify any data and make plots."""
self._plotter.load_data()
self._plotter.add_tag_groups(self._get_tag_groups())
self._plotter.plot_learning_curves()
def _get_tag_groups(self):
groups = [
(
f"{constants.NODE_FISCHER}_{i}",
[
f"{constants.NODE_FISCHER}_{0}_{i}",
f"{constants.NODE_FISCHER}_{1}_{i}",
],
)
for i in range(self._hidden_dimension)
]
groups.extend(
[
(
f"{constants.SECOND_LAYER_DERIVATIVES}_{i}",
[
f"{constants.SECOND_LAYER_DERIVATIVES}_{0}_{j}_{i}",
f"{constants.SECOND_LAYER_DERIVATIVES}_{1}_{j}_{i}",
],
)
for i, j in itertools.product(
range(self._hidden_dimension), range(self._output_dimension)
)
]
)
return groups
| import copy
import itertools
import os
from typing import Dict, List, Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from rama import constants, dataset, network, ewc
from run_modes import base_runner
class Runner(base_runner.BaseRunner):
"""
Runner class for rama experiments.
Extends base runner from run_modes module.
"""
def __init__(self, config, unique_id: str = "") -> None:
"""
Class constructor.
Args:
config: configuration object.
unique_id: optional unique runner id.
"""
self._first_task_epochs = config.switch_epoch
self._total_epochs = config.total_epochs
self._early_stopping = config.early_stopping
self._first_task_best_loss = np.inf
self._first_task_best_loss_index: int
self._input_dimension = config.input_dimension
self._hidden_dimension = config.hidden_dimension
self._output_dimension = config.output_dimension
self._labels = config.labels
self._network, self._optimiser = self._setup_network(config=config)
self._train_dataloaders, self._test_dataloaders = self._setup_data(
config=config
)
self._loss_function_type = config.loss_fn
self._loss_function = self._setup_loss_function(config=config)
self._ewc_importance = config.ewc_importance
self._device = config.experiment_device
super().__init__(config=config, unique_id=unique_id)
def _get_data_columns(self) -> List[str]:
"""Implements abstract method from parent base runner class.
Sets up logging columns.
Returns:
columns: list of strings denoting scalars to be logged.
"""
columns = [
constants.EPOCH_LOSS,
f"{constants.TEST}_{constants.LOSS}_0",
f"{constants.TEST}_{constants.LOSS}_1",
f"{constants.TEST}_{constants.ACCURACY}_0",
f"{constants.TEST}_{constants.ACCURACY}_1",
constants.NODE_NORM_ENTROPY,
]
columns.extend(
[f"{constants.SELF_OVERLAP}_{i}" for i in range(self._hidden_dimension)]
)
columns.extend(
[f"{constants.NODE_FISCHER}_{0}_{i}" for i in range(self._hidden_dimension)]
)
columns.extend(
[f"{constants.NODE_FISCHER}_{1}_{i}" for i in range(self._hidden_dimension)]
)
columns.extend(
[
f"{constants.SECOND_LAYER_DERIVATIVES}_{0}_{j}_{i}"
for i, j in itertools.product(
range(self._hidden_dimension), range(self._output_dimension)
)
]
)
columns.extend(
[
f"{constants.SECOND_LAYER_DERIVATIVES}_{1}_{j}_{i}"
for i, j in itertools.product(
range(self._hidden_dimension), range(self._output_dimension)
)
]
)
columns.extend(
[
f"{constants.NODE_DROPOUT_LOSS_DIFFS}_0_{i}"
for i in range(self._hidden_dimension)
]
)
columns.extend(
[
f"{constants.NODE_DROPOUT_LOSS_DIFFS}_1_{i}"
for i in range(self._hidden_dimension)
]
)
columns.extend(
[
f"{constants.NODE_DROPOUT_ACC_DIFFS}_0_{i}"
for i in range(self._hidden_dimension)
]
)
columns.extend(
[
f"{constants.NODE_DROPOUT_ACC_DIFFS}_1_{i}"
for i in range(self._hidden_dimension)
]
)
return columns
def _setup_data(self, config):
mixed_train_1, mixed_test_1 = dataset.FashionMNISTSplitter.get_mixed_dataloader(
config.indices[0],
config.indices[1],
mixing=config.mixing[0],
label_1=config.labels[0],
label_2=config.labels[1],
batch_size=config.batch_size,
shuffle=True,
whiten=config.whitening[0],
)
mixed_train_2, mixed_test_2 = dataset.FashionMNISTSplitter.get_mixed_dataloader(
config.indices[0],
config.indices[1],
mixing=config.mixing[1],
label_1=config.labels[0],
label_2=config.labels[1],
batch_size=config.batch_size,
shuffle=True,
whiten=config.whitening[1],
)
train_dataloaders = [mixed_train_1, mixed_train_2]
test_dataloaders = [mixed_test_1, mixed_test_2]
return train_dataloaders, test_dataloaders
def _setup_network(self, config):
net = network.TwoLayerRegressionNetwork(
input_dim=config.input_dimension,
hidden_dim=config.hidden_dimension,
output_dim=config.output_dimension,
nonlinearity=config.nonlinearity,
num_heads=len(config.indices),
biases=config.biases,
)
optimiser = torch.optim.SGD(params=net.parameters(), lr=config.learning_rate)
return net, optimiser
def _setup_loss_function(self, config):
if config.loss_fn == constants.MSE:
loss_fn = nn.MSELoss()
elif config.loss_fn == constants.CROSS_ENTROPY:
loss_fn = nn.CrossEntropyLoss()
return loss_fn
def _obtain_target_mappings(self, config):
target_mappings = {}
label_mappings = {}
for task_index, dataset_indices in enumerate(config.indices):
target_mapping, label_mapping = dataset.FashionMNISTSplitter.get_mapping(
index_1=dataset_indices[0], index_2=dataset_indices[1]
)
target_mappings = {**target_mappings, **target_mapping}
label_mappings = {**label_mappings, **label_mapping}
return target_mappings, label_mappings
def _compute_loss(self, prediction, target):
if self._loss_function_type == constants.MSE:
return self._loss_function(prediction.flatten(), target.to(torch.float))
elif self._loss_function_type == constants.CROSS_ENTROPY:
return self._loss_function(prediction, target.to(torch.long))
def _compute_correct(self, prediction, target):
if self._loss_function_type == constants.MSE:
if self._labels == [-1, 1]:
correct = (torch.sign(prediction) == target).item()
elif self._labels == [0, 1]:
correct = ((prediction.flatten() > 0.5) == target).item()
elif self._loss_function_type == constants.CROSS_ENTROPY:
softmax_prediction = F.softmax(prediction, dim=1)
class_prediction = torch.argmax(softmax_prediction, dim=1)
correct = sum(class_prediction == target).item()
return correct
def train(self):
self._pre_train_logging()
for e in range(self._first_task_epochs):
self._train_test_loop(epoch=e, task_index=0)
if self._early_stopping:
# load 'best' model from first task
self._logger.info(
(
"Early stopping: loading model from epoch "
f"{self._first_task_best_loss_index}"
)
)
self._network.load(
load_path=os.path.join(
self._checkpoint_path,
f"network_{self._first_task_best_loss_index}.pt",
)
)
for e in range(self._first_task_epochs, self._total_epochs):
if self._ewc_importance is not None:
ewc_module = ewc.EWC(
device=self._device, importance=self._ewc_importance
)
ewc_module.compute_first_task_importance(
network=self._network,
previous_task_index=0,
loss_function=self._compute_loss,
dataloader=self._test_dataloaders[0],
)
else:
ewc_module = None
self._train_test_loop(epoch=e, task_index=1, ewc=ewc_module)
def _pre_train_logging(self):
node_norms = self._compute_node_norms()
node_norm_entropy = self._compute_norms_entropy(node_norms=node_norms)
node_fischers_0 = self._compute_node_fischers(task_index=0)
node_fischers_1 = self._compute_node_fischers(task_index=1)
second_layer_derivatives_0 = self._second_layer_derivative(task_index=0)
second_layer_derivatives_1 = self._second_layer_derivative(task_index=1)
(
node_dropout_loss_diffs_0,
node_dropout_acc_diffs_0,
) = self._dropout_node_metrics(task_index=0)
(
node_dropout_loss_diffs_1,
node_dropout_acc_diffs_1,
) = self._dropout_node_metrics(task_index=1)
base_logging_dict = {constants.NODE_NORM_ENTROPY: node_norm_entropy}
overlap_logging_dict = {
f"{constants.SELF_OVERLAP}_{i}": norm for i, norm in enumerate(node_norms)
}
fischer_0_logging_dict = {
f"{constants.NODE_FISCHER}_{0}_{i}": fischer
for i, fischer in enumerate(node_fischers_0)
}
fischer_1_logging_dict = {
f"{constants.NODE_FISCHER}_{1}_{i}": fischer
for i, fischer in enumerate(node_fischers_1)
}
second_layer_derivatives_0_logging_dict = {
f"{constants.SECOND_LAYER_DERIVATIVES}_{0}_{i}_{j}": derivative
for (i, j), derivative in np.ndenumerate(second_layer_derivatives_0)
}
second_layer_derivatives_1_logging_dict = {
f"{constants.SECOND_LAYER_DERIVATIVES}_{1}_{i}_{j}": derivative
for (i, j), derivative in np.ndenumerate(second_layer_derivatives_1)
}
node_dropout_loss_diffs_0_logging_dict = {
f"{constants.NODE_DROPOUT_LOSS_DIFFS}_0_{i}": diff
for i, diff in enumerate(node_dropout_loss_diffs_0)
}
node_dropout_loss_diffs_1_logging_dict = {
f"{constants.NODE_DROPOUT_LOSS_DIFFS}_1_{i}": diff
for i, diff in enumerate(node_dropout_loss_diffs_1)
}
node_dropout_acc_diffs_0_logging_dict = {
f"{constants.NODE_DROPOUT_ACC_DIFFS}_0_{i}": diff
for i, diff in enumerate(node_dropout_acc_diffs_0)
}
node_dropout_acc_diffs_1_logging_dict = {
f"{constants.NODE_DROPOUT_ACC_DIFFS}_1_{i}": diff
for i, diff in enumerate(node_dropout_acc_diffs_1)
}
logging_dict = {
**base_logging_dict,
**overlap_logging_dict,
**fischer_0_logging_dict,
**fischer_1_logging_dict,
**second_layer_derivatives_0_logging_dict,
**second_layer_derivatives_1_logging_dict,
**node_dropout_loss_diffs_0_logging_dict,
**node_dropout_loss_diffs_1_logging_dict,
**node_dropout_acc_diffs_0_logging_dict,
**node_dropout_acc_diffs_1_logging_dict,
}
self._epoch_log(logging_dict=logging_dict, epoch=0)
def _train_test_loop(
self, epoch: int, task_index: int, ewc: Optional[ewc.EWC] = None
):
train_epoch_loss = self._train_loop(task_index=task_index, ewc_module=ewc)
test_loss_0, test_accuracy_0 = self._test_loop(task_index=0)
test_loss_1, test_accuracy_1 = self._test_loop(task_index=1)
node_norms = self._compute_node_norms()
node_norm_entropy = self._compute_norms_entropy(node_norms=node_norms)
node_fischers_0 = self._compute_node_fischers(task_index=0)
node_fischers_1 = self._compute_node_fischers(task_index=1)
second_layer_derivatives_0 = self._second_layer_derivative(task_index=0)
second_layer_derivatives_1 = self._second_layer_derivative(task_index=1)
(
node_dropout_loss_diffs_0,
node_dropout_acc_diffs_0,
) = self._dropout_node_metrics(task_index=0)
(
node_dropout_loss_diffs_1,
node_dropout_acc_diffs_1,
) = self._dropout_node_metrics(task_index=1)
if task_index == 0:
if self._early_stopping:
if test_loss_0 < self._first_task_best_loss:
self._first_task_best_loss_index = epoch
self._first_task_best_loss = test_loss_0
self._network.checkpoint(
save_path=os.path.join(
self._checkpoint_path, f"network_{epoch}.pt"
)
)
base_logging_dict = {
constants.EPOCH_LOSS: train_epoch_loss,
f"{constants.TEST}_{constants.LOSS}_0": test_loss_0,
f"{constants.TEST}_{constants.LOSS}_1": test_loss_1,
f"{constants.TEST}_{constants.ACCURACY}_0": test_accuracy_0,
f"{constants.TEST}_{constants.ACCURACY}_1": test_accuracy_1,
constants.NODE_NORM_ENTROPY: node_norm_entropy,
}
overlap_logging_dict = {
f"{constants.SELF_OVERLAP}_{i}": norm for i, norm in enumerate(node_norms)
}
fischer_0_logging_dict = {
f"{constants.NODE_FISCHER}_{0}_{i}": fischer
for i, fischer in enumerate(node_fischers_0)
}
fischer_1_logging_dict = {
f"{constants.NODE_FISCHER}_{1}_{i}": fischer
for i, fischer in enumerate(node_fischers_1)
}
second_layer_derivatives_0_logging_dict = {
f"{constants.SECOND_LAYER_DERIVATIVES}_{0}_{i}_{j}": derivative
for (i, j), derivative in np.ndenumerate(second_layer_derivatives_0)
}
second_layer_derivatives_1_logging_dict = {
f"{constants.SECOND_LAYER_DERIVATIVES}_{1}_{i}_{j}": derivative
for (i, j), derivative in np.ndenumerate(second_layer_derivatives_1)
}
node_dropout_loss_diffs_0_logging_dict = {
f"{constants.NODE_DROPOUT_LOSS_DIFFS}_0_{i}": diff
for i, diff in enumerate(node_dropout_loss_diffs_0)
}
node_dropout_loss_diffs_1_logging_dict = {
f"{constants.NODE_DROPOUT_LOSS_DIFFS}_1_{i}": diff
for i, diff in enumerate(node_dropout_loss_diffs_1)
}
node_dropout_acc_diffs_0_logging_dict = {
f"{constants.NODE_DROPOUT_ACC_DIFFS}_0_{i}": diff
for i, diff in enumerate(node_dropout_acc_diffs_0)
}
node_dropout_acc_diffs_1_logging_dict = {
f"{constants.NODE_DROPOUT_ACC_DIFFS}_1_{i}": diff
for i, diff in enumerate(node_dropout_acc_diffs_1)
}
logging_dict = {
**base_logging_dict,
**overlap_logging_dict,
**fischer_0_logging_dict,
**fischer_1_logging_dict,
**second_layer_derivatives_0_logging_dict,
**second_layer_derivatives_1_logging_dict,
**node_dropout_loss_diffs_0_logging_dict,
**node_dropout_loss_diffs_1_logging_dict,
**node_dropout_acc_diffs_0_logging_dict,
**node_dropout_acc_diffs_1_logging_dict,
}
self._epoch_log(logging_dict=logging_dict, epoch=epoch)
self._logger.info(f"Epoch {epoch + 1} loss: {train_epoch_loss}")
self._data_logger.checkpoint()
def _epoch_log(self, logging_dict: Dict[str, float], epoch: int):
for tag, scalar in logging_dict.items():
self._data_logger.write_scalar(tag=tag, step=epoch, scalar=scalar)
def _train_loop(self, task_index: int, ewc_module: Optional[ewc.EWC] = None):
self._network.switch(new_task_index=task_index)
loader = self._train_dataloaders[task_index]
size = len(loader.dataset)
epoch_loss = 0
for batch, (x, y) in enumerate(loader):
self._optimiser.zero_grad()
prediction = self._network(x)
loss = self._compute_loss(prediction, y)
if ewc_module is not None:
regularisation_term = ewc_module.penalty(self._network)
loss += regularisation_term
loss.backward()
self._optimiser.step()
epoch_loss += loss.item()
return epoch_loss / size
def _test_loop(self, task_index: int):
epoch_loss = 0
correct_instances = 0
loader = self._test_dataloaders[task_index]
size = len(loader.dataset)
with torch.no_grad():
for batch, (x, y) in enumerate(loader):
prediction = self._network.test_forward(x=x, head_index=task_index)
loss = self._compute_loss(prediction, y)
epoch_loss += loss.item()
correct = self._compute_correct(prediction, y)
correct_instances += correct
return epoch_loss / size, correct_instances / size
def _compute_norms_entropy(self, node_norms: np.ndarray) -> float:
"""Compute and log 'entropy' over node norms.
This pseudo-entropy is computed by:
- normalising the array of node norms
- binning these normalised values
- computing entropy over this binned distribution
Args:
epoch: epoch count (for logging).
node_norms: magnitudes of hidden units.
Returns:
pseudo_entropy: pseudo measure of node norm entropy.
"""
normalised_norms = node_norms / np.max(node_norms)
binned_norms, _ = np.histogram(normalised_norms)
dist = binned_norms / np.max(binned_norms)
pseudo_entropy = -1 * np.sum(
[(d + constants.EPS) * np.log(d + constants.EPS) for d in dist]
)
return pseudo_entropy
def _compute_node_norms(self) -> None:
network_copy = copy.deepcopy(self._network)
layer = network_copy.layer_weights
sel_sim = torch.mm(layer, layer.t()).numpy() / self._input_dimension
norms = np.diagonal(sel_sim)
return norms
def _compute_node_fischers(self, task_index: int) -> List:
loader = self._test_dataloaders[task_index]
size = len(loader.dataset)
self._network.switch(new_task_index=task_index)
node_fischers = [0 for _ in range(self._hidden_dimension)]
for batch, (x, y) in enumerate(loader):
pre_activation = self._network.input_to_hidden(x=x)
post_activation = self._network.activate(x=pre_activation)
prediction = self._network.hidden_to_output(
x=post_activation, head_index=task_index
)
loss = self._compute_loss(prediction, y)
derivative = torch.autograd.grad(loss, post_activation)[0]
for node_index, node_derivative in enumerate(derivative[0]):
node_fischers[node_index] += node_derivative.detach().item() ** 2 / size
return node_fischers
def _dropout_node_metrics(self, task_index: int):
"""Evaluate node importance by computing drop in
accuracy/rise in loss from dropping out node.
"""
standard_test_loss = 0
standard_correct_instances = 0
masked_losses = np.zeros(self._hidden_dimension)
masked_correct_instances = np.zeros(self._hidden_dimension)
loader = self._test_dataloaders[task_index]
size = len(loader.dataset)
self._network.switch(new_task_index=task_index)
with torch.no_grad():
for batch, (x, y) in enumerate(loader):
prediction = self._network.test_forward(x=x, head_index=task_index)
loss = self._compute_loss(prediction, y)
standard_test_loss += loss.item()
correct = self._compute_correct(prediction, y)
standard_correct_instances += correct
for i in range(self._hidden_dimension):
pre_activation = self._network.input_to_hidden(x)
post_activation = self._network.activate(pre_activation)
# mask post activation
post_activation[:, i] = 0
masked_prediction = self._network.hidden_to_output(
post_activation, task_index
)
masked_loss = self._compute_loss(masked_prediction, y)
masked_losses[i] += masked_loss.item()
masked_correct = self._compute_correct(masked_prediction, y)
masked_correct_instances[i] += masked_correct
masked_loss_diffs = masked_losses / size - standard_test_loss / size
masked_acc_diffs = (
masked_correct_instances / size - standard_correct_instances / size
)
return masked_loss_diffs, masked_acc_diffs
def _second_layer_derivative(self, task_index: int) -> List:
loader = self._test_dataloaders[task_index]
size = len(loader.dataset)
self._network.switch(new_task_index=task_index)
second_layer_derivatives = [
[0 for i in range(self._hidden_dimension)]
for j in range(self._output_dimension)
]
for batch, (x, y) in enumerate(loader):
prediction = self._network(x)
loss = self._compute_loss(prediction, y)
loss.backward()
second_layer_derivative = [
p.grad for p in self._network._heads[task_index].parameters()
][0]
for i in range(self._output_dimension):
for j in range(self._hidden_dimension):
second_layer_derivatives[i][j] += (
second_layer_derivative[i][j].item() / size
)
return second_layer_derivatives
def post_process(self) -> None:
"""Solidify any data and make plots."""
self._plotter.load_data()
self._plotter.add_tag_groups(self._get_tag_groups())
self._plotter.plot_learning_curves()
def _get_tag_groups(self):
groups = [
(
f"{constants.NODE_FISCHER}_{i}",
[
f"{constants.NODE_FISCHER}_{0}_{i}",
f"{constants.NODE_FISCHER}_{1}_{i}",
],
)
for i in range(self._hidden_dimension)
]
groups.extend(
[
(
f"{constants.SECOND_LAYER_DERIVATIVES}_{i}",
[
f"{constants.SECOND_LAYER_DERIVATIVES}_{0}_{j}_{i}",
f"{constants.SECOND_LAYER_DERIVATIVES}_{1}_{j}_{i}",
],
)
for i, j in itertools.product(
range(self._hidden_dimension), range(self._output_dimension)
)
]
)
return groups | en | 0.703981 | Runner class for rama experiments. Extends base runner from run_modes module. Class constructor. Args: config: configuration object. unique_id: optional unique runner id. Implements abstract method from parent base runner class. Sets up logging columns. Returns: columns: list of strings denoting scalars to be logged. # load 'best' model from first task Compute and log 'entropy' over node norms. This pseudo-entropy is computed by: - normalising the array of node norms - binning these normalised values - computing entropy over this binned distribution Args: epoch: epoch count (for logging). node_norms: magnitudes of hidden units. Returns: pseudo_entropy: pseudo measure of node norm entropy. Evaluate node importance by computing drop in accuracy/rise in loss from dropping out node. # mask post activation Solidify any data and make plots. | 2.237275 | 2 |
tests/app/ltc_test.py | gilmourj/ltc-data-processing | 0 | 6617006 | <gh_stars>0
import pytest
import filecmp
import os
import tempfile
import shutil
import pandas as pd
import flask_server
from app.api.close_outbreaks import close_outbreaks
from app.api.fill_missing_dates import fill_missing_dates
from app.api.replace_no_data import replace_no_data
@pytest.fixture(scope="session", autouse=True)
def app_context():
with flask_server.app.app_context():
yield
def test_close_outbreaks():
test_csv = "tests/app/fixtures/close_outbreak_nm_example.csv"
df = pd.read_csv(test_csv)
closed = close_outbreaks(df)
temp_file = tempfile.NamedTemporaryFile(delete=False)
closed.to_csv(temp_file.name, index = False)
assert filecmp.cmp(temp_file.name, "tests/app/fixtures/expected_close_outbreak_nm_example.csv")
os.remove(temp_file.name)
def test_fill_in_mising_dates_non_thursday():
test_csv = "tests/app/fixtures/state_with_missing_date_not_a_thursday.csv"
df = pd.read_csv(test_csv)
filled = fill_missing_dates(df)
temp_file = tempfile.NamedTemporaryFile(delete=False)
filled.to_csv(temp_file.name, index = False)
assert filecmp.cmp(temp_file.name, "tests/app/fixtures/expected_state_with_missing_dates.csv")
os.remove(temp_file.name)
def test_no_data():
test_csv = "tests/app/fixtures/test_no_data.csv"
df = pd.read_csv(test_csv)
replaced = replace_no_data(df)
temp_file = tempfile.NamedTemporaryFile(delete=False)
replaced.to_csv(temp_file.name, index = False)
assert filecmp.cmp(temp_file.name, "tests/app/fixtures/expected_no_data.csv")
os.remove(temp_file.name)
| import pytest
import filecmp
import os
import tempfile
import shutil
import pandas as pd
import flask_server
from app.api.close_outbreaks import close_outbreaks
from app.api.fill_missing_dates import fill_missing_dates
from app.api.replace_no_data import replace_no_data
@pytest.fixture(scope="session", autouse=True)
def app_context():
with flask_server.app.app_context():
yield
def test_close_outbreaks():
test_csv = "tests/app/fixtures/close_outbreak_nm_example.csv"
df = pd.read_csv(test_csv)
closed = close_outbreaks(df)
temp_file = tempfile.NamedTemporaryFile(delete=False)
closed.to_csv(temp_file.name, index = False)
assert filecmp.cmp(temp_file.name, "tests/app/fixtures/expected_close_outbreak_nm_example.csv")
os.remove(temp_file.name)
def test_fill_in_mising_dates_non_thursday():
test_csv = "tests/app/fixtures/state_with_missing_date_not_a_thursday.csv"
df = pd.read_csv(test_csv)
filled = fill_missing_dates(df)
temp_file = tempfile.NamedTemporaryFile(delete=False)
filled.to_csv(temp_file.name, index = False)
assert filecmp.cmp(temp_file.name, "tests/app/fixtures/expected_state_with_missing_dates.csv")
os.remove(temp_file.name)
def test_no_data():
test_csv = "tests/app/fixtures/test_no_data.csv"
df = pd.read_csv(test_csv)
replaced = replace_no_data(df)
temp_file = tempfile.NamedTemporaryFile(delete=False)
replaced.to_csv(temp_file.name, index = False)
assert filecmp.cmp(temp_file.name, "tests/app/fixtures/expected_no_data.csv")
os.remove(temp_file.name) | none | 1 | 2.3473 | 2 | |
client/verta/verta/_swagger/_public/modeldb/model/JobTypeEnumJobType.py | CaptEmulation/modeldb | 835 | 6617007 | # THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class JobTypeEnumJobType(BaseType):
_valid_values = [
"KUBERNETES_JOB",
]
def __init__(self, val):
if val not in JobTypeEnumJobType._valid_values:
raise ValueError('{} is not a valid value for JobTypeEnumJobType'.format(val))
self.value = val
def to_json(self):
return self.value
def from_json(v):
if isinstance(v, str):
return JobTypeEnumJobType(v)
else:
return JobTypeEnumJobType(JobTypeEnumJobType._valid_values[v])
| # THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class JobTypeEnumJobType(BaseType):
_valid_values = [
"KUBERNETES_JOB",
]
def __init__(self, val):
if val not in JobTypeEnumJobType._valid_values:
raise ValueError('{} is not a valid value for JobTypeEnumJobType'.format(val))
self.value = val
def to_json(self):
return self.value
def from_json(v):
if isinstance(v, str):
return JobTypeEnumJobType(v)
else:
return JobTypeEnumJobType(JobTypeEnumJobType._valid_values[v])
| en | 0.592941 | # THIS FILE IS AUTO-GENERATED. DO NOT EDIT | 2.253345 | 2 |
py/OPC/da/exceptions.py | mabotech/mabo.io | 0 | 6617008 | <gh_stars>0
# -*- coding: utf-8 -*-
class OPCException():
pass
class GatewayException(OPCException):
pass
class OPCServerException(OPCException):
pass
| # -*- coding: utf-8 -*-
class OPCException():
pass
class GatewayException(OPCException):
pass
class OPCServerException(OPCException):
pass | en | 0.769321 | # -*- coding: utf-8 -*- | 1.334594 | 1 |
gb_dots_stock_pipelines/comps_calc_cos_similarity/comp_add_price_on_pattern.py | loopinf/vertex-ai-samples | 0 | 6617009 | from kfp.v2.dsl import (Dataset, Input, Output)
def add_price_on_pattern(
date_ref: str,
kernel_size: int):
import logging
FORMAT = "[%(filename)s->%(funcName)s():%(lineno)s]%(levelname)s: %(message)s"
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
import pandas_gbq # type: ignore
from trading_calendars import get_calendar
cal_krx = get_calendar('XKRX')
import pandas as pd
import numpy as np
from pandas.tseries.offsets import CustomBusinessDay
cbday = CustomBusinessDay(holidays=cal_krx.adhoc_holidays)
import multiprocessing
from multiprocessing import Pool
N_cpu = multiprocessing.cpu_count()
logging.debug(f'cpu_count : {N_cpu}')
import time
import functools
PROJECT_ID = 'dots-stock'
from google.cloud import bigquery
client = bigquery.Client(PROJECT_ID)
N_prev = 20
N_next = 5
N_all = N_prev + N_next
# get pattern
def get_pattern(date_ref, kernel_size):
logging.debug(f'{date_ref, kernel_size}')
date_ref_ = pd.Timestamp(date_ref).strftime('%Y-%m-%d')
if kernel_size in (3, 6):
table_id = f"{PROJECT_ID}.red_lion.pattern_v2_{kernel_size}_{date_ref}"
elif kernel_size in (10, 20):
table_id = f"{PROJECT_ID}.red_lion.pattern_oc_cc_{kernel_size}_{date_ref}"
else:
raise
sql = f'''
SELECT
*
FROM
`{table_id}`
WHERE
source_date = "{date_ref_}"
ORDER BY
date
'''
df = pandas_gbq.read_gbq(sql, project_id=PROJECT_ID, use_bqstorage_api=True)
return df
df_pattern_raw = get_pattern(date_ref, kernel_size)
def get_price(date_ref):
date_ref_ = pd.Timestamp(date_ref).strftime('%Y-%m-%d')
table_id = f"{PROJECT_ID}.red_lion.adj_price_{date_ref}"
logging.debug(f'{date_ref}: {table_id}')
sql = f'''
SELECT
*
FROM
`{table_id}`
# WHERE
# date = "{date_ref_}"
ORDER BY
date
'''
df = pandas_gbq.read_gbq(sql, project_id=PROJECT_ID, use_bqstorage_api=True)
return df
df_price = get_price(date_ref)
df_pattern_dedup = (
df_pattern_raw.drop_duplicates(subset=['date','Code'])
)
_df = df_pattern_dedup
@functools.lru_cache(maxsize=None)
def get_krx_range(x):
return pd.bdate_range(
pd.Timestamp(x) - (N_prev - 1) * cbday,
periods=N_all,
freq='C',
holidays = cal_krx.adhoc_holidays
)
# it took 29s
_df1 = _df.assign(
date_ohlcv = _df[['date']].applymap(
get_krx_range
)
)[['Code','date','date_ohlcv']]
#### check how much time it takes to run this
t1 = time.time()
l_df_spl = np.array_split(_df1, N_cpu)
global mp_split
def mp_split(_df1_spl):
_df1_spl = _df1_spl.copy()
_df1_split = (_df1_spl
.date_ohlcv
.apply(
pd.Series # it took 1m 12s
# transform_to_series # it took 1m 34s
))
return _df1_split
with Pool(N_cpu) as pool:
result = pool.map(mp_split, l_df_spl) # 1m 17s
# result = pool.imap(mp_split, l_df_spl) # 훨 씬 오래 걸림
dt = time.time() - t1
logging.debug(f'multiprocessing takes {dt} seconds, previous it takes 1m 17s')
_df1_split = \
(pd.concat(result)
.rename(columns=lambda x: f'd{x:02d}')
)
_df11 = pd.concat(
[_df1.loc[:,['Code','date']], _df1_split ],
axis=1
)
_df2 = \
(_df11
.set_index(['Code','date'], drop=True)
.stack()
.reset_index()
.rename(columns={0:'date_price','level_2':'n_step'})
)
#### it took 41s
_df3 = \
(_df2
.merge(df_price,
left_on=['Code','date_price'],
right_on=['code','Date'],
how='left'
)
.drop(['Date','Change'], axis=1)
.assign(date_price=
lambda df: df.date_price.dt.strftime('%Y-%m-%d'))
# .replace([np.nan], [None]) # to GBQ
# .replace([np.nan], [0]) # to GBQ
.fillna(0)
)
# !pip install multiprocesspandas
# from multiprocesspandas import applyparallel
df_Open = (_df3
.groupby(['Code','date'])
['Open']
.apply(list)
).to_frame()
df_Close = (_df3
.groupby(['Code','date'])
['Close']
.apply(list)
).to_frame()
df_High = (_df3
.groupby(['Code','date'])
['High']
.apply(list)
).to_frame()
df_Low = (_df3
.groupby(['Code','date'])
['Low']
.apply(list)
).to_frame()
df_Volume = (_df3
.groupby(['Code','date'])
['Volume']
.apply(list)
).to_frame()
df_date_price = (_df3
.groupby(['Code','date'])
['date_price']
.apply(list)
).to_frame()
_df4 = pd.concat([
df_Open,
df_High,
df_Low,
df_Close,
df_Volume,
df_date_price],
axis=1
).reset_index()
_df5 = \
(pd.merge(
df_pattern_raw,
_df4,
how='left',
left_on=['date','Code'],
right_on = ['date', 'Code'] )
)
_df6 = \
(_df5.assign(
date = lambda df: df.date.dt.strftime('%Y-%m-%d'),
source_date = lambda df: df.source_date.dt.strftime('%Y-%m-%d'),
))
def to_gbq_table_pattern(df, date_ref):
schema = [
bigquery.SchemaField(name="date", field_type="DATE"),
bigquery.SchemaField(name="source_date", field_type="DATE"),
bigquery.SchemaField(name="similarity", field_type="FLOAT"),
bigquery.SchemaField(name="source_code", field_type="STRING"),
bigquery.SchemaField(name="Code", field_type="STRING"),
bigquery.SchemaField(name="Open", field_type="INT64", mode="REPEATED"),
bigquery.SchemaField(name="High", field_type="INT64", mode="REPEATED"),
bigquery.SchemaField(name="Low", field_type="INT64", mode="REPEATED"),
bigquery.SchemaField(name="Close", field_type="INT64", mode="REPEATED"),
bigquery.SchemaField(name="Volume", field_type="INT64", mode="REPEATED"),
bigquery.SchemaField(name="date_price", field_type="STRING", mode="REPEATED")
]
# table_id = f'red_lion.{table_name}_testing_{date_ref}'
if kernel_size in (3, 6):
table_id = f"{PROJECT_ID}.red_lion.pattern_v2_price_{kernel_size}_{date_ref}"
elif kernel_size in (10, 20):
table_id = f"{PROJECT_ID}.red_lion.pattern_oc_cc_price_{kernel_size}_{date_ref}"
else:
logging.error(f'Check kernel_size: {kernel_size}')
table = bigquery.Table(
table_id,
schema=schema
)
table.clustering_fields = ["source_code"]
print(table)
try:
client.create_table(table)
except Exception as e:
print(e)
if ('Already Exists' in e.args[0]): # and if_exists=='replace' :
table = client.get_table(table_id)
else: raise
errors = client.insert_rows_from_dataframe(table, df)
for chunk in errors:
print(f"encountered {len(chunk)} errors: {chunk}")
# if len(errors) > 1: raise
print(len(errors))
to_gbq_table_pattern(_df6, date_ref ) | from kfp.v2.dsl import (Dataset, Input, Output)
def add_price_on_pattern(
date_ref: str,
kernel_size: int):
import logging
FORMAT = "[%(filename)s->%(funcName)s():%(lineno)s]%(levelname)s: %(message)s"
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
import pandas_gbq # type: ignore
from trading_calendars import get_calendar
cal_krx = get_calendar('XKRX')
import pandas as pd
import numpy as np
from pandas.tseries.offsets import CustomBusinessDay
cbday = CustomBusinessDay(holidays=cal_krx.adhoc_holidays)
import multiprocessing
from multiprocessing import Pool
N_cpu = multiprocessing.cpu_count()
logging.debug(f'cpu_count : {N_cpu}')
import time
import functools
PROJECT_ID = 'dots-stock'
from google.cloud import bigquery
client = bigquery.Client(PROJECT_ID)
N_prev = 20
N_next = 5
N_all = N_prev + N_next
# get pattern
def get_pattern(date_ref, kernel_size):
logging.debug(f'{date_ref, kernel_size}')
date_ref_ = pd.Timestamp(date_ref).strftime('%Y-%m-%d')
if kernel_size in (3, 6):
table_id = f"{PROJECT_ID}.red_lion.pattern_v2_{kernel_size}_{date_ref}"
elif kernel_size in (10, 20):
table_id = f"{PROJECT_ID}.red_lion.pattern_oc_cc_{kernel_size}_{date_ref}"
else:
raise
sql = f'''
SELECT
*
FROM
`{table_id}`
WHERE
source_date = "{date_ref_}"
ORDER BY
date
'''
df = pandas_gbq.read_gbq(sql, project_id=PROJECT_ID, use_bqstorage_api=True)
return df
df_pattern_raw = get_pattern(date_ref, kernel_size)
def get_price(date_ref):
date_ref_ = pd.Timestamp(date_ref).strftime('%Y-%m-%d')
table_id = f"{PROJECT_ID}.red_lion.adj_price_{date_ref}"
logging.debug(f'{date_ref}: {table_id}')
sql = f'''
SELECT
*
FROM
`{table_id}`
# WHERE
# date = "{date_ref_}"
ORDER BY
date
'''
df = pandas_gbq.read_gbq(sql, project_id=PROJECT_ID, use_bqstorage_api=True)
return df
df_price = get_price(date_ref)
df_pattern_dedup = (
df_pattern_raw.drop_duplicates(subset=['date','Code'])
)
_df = df_pattern_dedup
@functools.lru_cache(maxsize=None)
def get_krx_range(x):
return pd.bdate_range(
pd.Timestamp(x) - (N_prev - 1) * cbday,
periods=N_all,
freq='C',
holidays = cal_krx.adhoc_holidays
)
# it took 29s
_df1 = _df.assign(
date_ohlcv = _df[['date']].applymap(
get_krx_range
)
)[['Code','date','date_ohlcv']]
#### check how much time it takes to run this
t1 = time.time()
l_df_spl = np.array_split(_df1, N_cpu)
global mp_split
def mp_split(_df1_spl):
_df1_spl = _df1_spl.copy()
_df1_split = (_df1_spl
.date_ohlcv
.apply(
pd.Series # it took 1m 12s
# transform_to_series # it took 1m 34s
))
return _df1_split
with Pool(N_cpu) as pool:
result = pool.map(mp_split, l_df_spl) # 1m 17s
# result = pool.imap(mp_split, l_df_spl) # 훨 씬 오래 걸림
dt = time.time() - t1
logging.debug(f'multiprocessing takes {dt} seconds, previous it takes 1m 17s')
_df1_split = \
(pd.concat(result)
.rename(columns=lambda x: f'd{x:02d}')
)
_df11 = pd.concat(
[_df1.loc[:,['Code','date']], _df1_split ],
axis=1
)
_df2 = \
(_df11
.set_index(['Code','date'], drop=True)
.stack()
.reset_index()
.rename(columns={0:'date_price','level_2':'n_step'})
)
#### it took 41s
_df3 = \
(_df2
.merge(df_price,
left_on=['Code','date_price'],
right_on=['code','Date'],
how='left'
)
.drop(['Date','Change'], axis=1)
.assign(date_price=
lambda df: df.date_price.dt.strftime('%Y-%m-%d'))
# .replace([np.nan], [None]) # to GBQ
# .replace([np.nan], [0]) # to GBQ
.fillna(0)
)
# !pip install multiprocesspandas
# from multiprocesspandas import applyparallel
df_Open = (_df3
.groupby(['Code','date'])
['Open']
.apply(list)
).to_frame()
df_Close = (_df3
.groupby(['Code','date'])
['Close']
.apply(list)
).to_frame()
df_High = (_df3
.groupby(['Code','date'])
['High']
.apply(list)
).to_frame()
df_Low = (_df3
.groupby(['Code','date'])
['Low']
.apply(list)
).to_frame()
df_Volume = (_df3
.groupby(['Code','date'])
['Volume']
.apply(list)
).to_frame()
df_date_price = (_df3
.groupby(['Code','date'])
['date_price']
.apply(list)
).to_frame()
_df4 = pd.concat([
df_Open,
df_High,
df_Low,
df_Close,
df_Volume,
df_date_price],
axis=1
).reset_index()
_df5 = \
(pd.merge(
df_pattern_raw,
_df4,
how='left',
left_on=['date','Code'],
right_on = ['date', 'Code'] )
)
_df6 = \
(_df5.assign(
date = lambda df: df.date.dt.strftime('%Y-%m-%d'),
source_date = lambda df: df.source_date.dt.strftime('%Y-%m-%d'),
))
def to_gbq_table_pattern(df, date_ref):
schema = [
bigquery.SchemaField(name="date", field_type="DATE"),
bigquery.SchemaField(name="source_date", field_type="DATE"),
bigquery.SchemaField(name="similarity", field_type="FLOAT"),
bigquery.SchemaField(name="source_code", field_type="STRING"),
bigquery.SchemaField(name="Code", field_type="STRING"),
bigquery.SchemaField(name="Open", field_type="INT64", mode="REPEATED"),
bigquery.SchemaField(name="High", field_type="INT64", mode="REPEATED"),
bigquery.SchemaField(name="Low", field_type="INT64", mode="REPEATED"),
bigquery.SchemaField(name="Close", field_type="INT64", mode="REPEATED"),
bigquery.SchemaField(name="Volume", field_type="INT64", mode="REPEATED"),
bigquery.SchemaField(name="date_price", field_type="STRING", mode="REPEATED")
]
# table_id = f'red_lion.{table_name}_testing_{date_ref}'
if kernel_size in (3, 6):
table_id = f"{PROJECT_ID}.red_lion.pattern_v2_price_{kernel_size}_{date_ref}"
elif kernel_size in (10, 20):
table_id = f"{PROJECT_ID}.red_lion.pattern_oc_cc_price_{kernel_size}_{date_ref}"
else:
logging.error(f'Check kernel_size: {kernel_size}')
table = bigquery.Table(
table_id,
schema=schema
)
table.clustering_fields = ["source_code"]
print(table)
try:
client.create_table(table)
except Exception as e:
print(e)
if ('Already Exists' in e.args[0]): # and if_exists=='replace' :
table = client.get_table(table_id)
else: raise
errors = client.insert_rows_from_dataframe(table, df)
for chunk in errors:
print(f"encountered {len(chunk)} errors: {chunk}")
# if len(errors) > 1: raise
print(len(errors))
to_gbq_table_pattern(_df6, date_ref ) | en | 0.59586 | # type: ignore # get pattern SELECT * FROM `{table_id}` WHERE source_date = "{date_ref_}" ORDER BY date SELECT * FROM `{table_id}` # WHERE # date = "{date_ref_}" ORDER BY date # it took 29s #### check how much time it takes to run this # it took 1m 12s # transform_to_series # it took 1m 34s # 1m 17s # result = pool.imap(mp_split, l_df_spl) # 훨 씬 오래 걸림 #### it took 41s # .replace([np.nan], [None]) # to GBQ # .replace([np.nan], [0]) # to GBQ # !pip install multiprocesspandas # from multiprocesspandas import applyparallel # table_id = f'red_lion.{table_name}_testing_{date_ref}' # and if_exists=='replace' : # if len(errors) > 1: raise | 2.163713 | 2 |
examples/all.py | KSanthanam/RaspberryPiMovementDetector | 0 | 6617010 | <reponame>KSanthanam/RaspberryPiMovementDetector
# -*- coding: utf-8 -*-
# python imports
import os
import sys
import time
import fake_rpi
# adjust the path to import RaspberryPiMovementDetector
base = os.path.normpath(os.path.join(os.path.abspath(__file__), "../.."))
sys.path.insert(0, base)
# Make it work on non RaspberryPi device
sys.modules['RPi'] = fake_rpi.RPi
try:
from RPi.GPIO import GPIO
except:
import RPi as RPi
GPIO = RPi.GPIO
# create an Watch instance
from MovementDetector import Watch
TRIG = 23
ECHO = 24
def func_moved_in(arg):
print("process for object entering field")
def func_moved_out(arg):
print("process for object exiting field")
OFFSET = 200 # 2m
watch = Watch(gpio=GPIO, trig=TRIG, echo=ECHO, func_in=func_moved_in, func_out=func_moved_out, offset=OFFSET)
watch.observe()
time.sleep(10) # Sleep
watch.stop()
| # -*- coding: utf-8 -*-
# python imports
import os
import sys
import time
import fake_rpi
# adjust the path to import RaspberryPiMovementDetector
base = os.path.normpath(os.path.join(os.path.abspath(__file__), "../.."))
sys.path.insert(0, base)
# Make it work on non RaspberryPi device
sys.modules['RPi'] = fake_rpi.RPi
try:
from RPi.GPIO import GPIO
except:
import RPi as RPi
GPIO = RPi.GPIO
# create an Watch instance
from MovementDetector import Watch
TRIG = 23
ECHO = 24
def func_moved_in(arg):
print("process for object entering field")
def func_moved_out(arg):
print("process for object exiting field")
OFFSET = 200 # 2m
watch = Watch(gpio=GPIO, trig=TRIG, echo=ECHO, func_in=func_moved_in, func_out=func_moved_out, offset=OFFSET)
watch.observe()
time.sleep(10) # Sleep
watch.stop() | en | 0.582792 | # -*- coding: utf-8 -*- # python imports # adjust the path to import RaspberryPiMovementDetector # Make it work on non RaspberryPi device # create an Watch instance # 2m # Sleep | 2.490207 | 2 |
keras/checkpoint.py | junho-m/rain | 0 | 6617011 | <filename>keras/checkpoint.py
#!/usr/bin/env python
# coding: utf-8
# ##### Copyright 2018 The TensorFlow Authors.
# In[ ]:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# # 체크포인트 훈련하기
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/guide/checkpoint"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/guide/checkpoint.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Google Colab)에서 실행하기</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/guide/checkpoint.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃헙(GitHub) 소스 보기</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/guide/checkpoint.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도
# 불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.
# 이 번역에 개선할 부분이 있다면
# [tensorflow/docs-l10n](https://github.com/tensorflow/docs-l10n/) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.
# 문서 번역이나 리뷰에 참여하려면
# [<EMAIL>](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로
# 메일을 보내주시기 바랍니다.
# "텐서플로 모델 저장하기" 라는 문구는 보통 둘중 하나를 의미합니다:
#
# 1. Checkpoints, 혹은
# 2. SavedModel.
#
# Checkpoint는 모델이 사용한 모든 매개변수(`tf.Variable` 객체들)의 정확한 값을 캡처합니다. Chekcpoint는 모델에 의해 정의된 연산에 대한 설명을 포함하지 않으므로 일반적으로 저장된 매개변수 값을 사용할 소스 코드를 사용할 수 있을 때만 유용합니다.
#
# 반면 SavedModel 형식은 매개변수 값(체크포인트) 외에 모델에 의해 정의된 연산에 대한 일련화된 설명을 포함합니다. 이 형식의 모델은 모델을 만든 소스 코드와 독립적입니다. 따라서 TensorFlow Serving, TensorFlow Lite, TensorFlow.js 또는 다른 프로그래밍 언어(C, C++, Java, Go, Rust, C# 등. TensorFlow APIs)로 배포하기에 적합합니다.
#
# 이 가이드는 체크포인트 쓰기 및 읽기를 위한 API들을 다룹니다.
# ## 설치
# In[ ]:
import tensorflow as tf
# In[ ]:
class Net(tf.keras.Model):
"""A simple linear model."""
def __init__(self):
super(Net, self).__init__()
self.l1 = tf.keras.layers.Dense(5)
def call(self, x):
return self.l1(x)
# In[ ]:
net = Net()
# ## `tf.keras` 훈련 API들로부터 저장하기
#
# [`tf.keras` 저장하고 복구하는
# 가이드](./keras/overview.ipynb#save_and_restore)를 읽어봅시다.
#
# `tf.keras.Model.save_weights` 가 텐서플로 CheckPoint를 저장합니다.
# In[ ]:
net.save_weights('easy_checkpoint')
# ## Checkpoints 작성하기
#
# 텐서플로 모델의 지속적인 상태는 `tf.Variable` 객체에 저장되어 있습니다. 이들은 직접으로 구성할 수 있지만, `tf.keras.layers` 혹은 `tf.keras.Model`와 같은 고수준 API들로 만들어 지기도 합니다.
#
# 변수를 관리하는 가장 쉬운 방법은 Python 객체에 변수를 연결한 다음 해당 객체를 참조하는 것입니다.
#
# `tf.train.Checkpoint`, `tf.keras.layers.Layer`, and `tf.keras.Model`의 하위클래스들은 해당 속성에 할당된 변수를 자동 추적합니다. 다음 예시는 간단한 선형 model을 구성하고, 모든 model 변수의 값을 포합하는 checkpoint를 씁니다.
# `Model.save_weights`를 사용해 손쉽게 model-checkpoint를 저장할 수 있습니다.
# ### 직접 Checkpoint작성하기
# #### 설치
# `tf.train.Checkpoint`의 모든 특성을 입증하기 위해서 toy dataset과 optimization step을 정의해야 합니다.
# In[ ]:
def toy_dataset():
inputs = tf.range(10.)[:, None]
labels = inputs * 5. + tf.range(5.)[None, :]
return tf.data.Dataset.from_tensor_slices(
dict(x=inputs, y=labels)).repeat(10).batch(2)
# In[ ]:
def train_step(net, example, optimizer):
"""Trains `net` on `example` using `optimizer`."""
with tf.GradientTape() as tape:
output = net(example['x'])
loss = tf.reduce_mean(tf.abs(output - example['y']))
variables = net.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return loss
# #### Checkpoint객체 생성
#
# 인위적으로 checkpoint를 만드려면 `tf.train.Checkpoint` 객체가 필요합니다. Checkpoint하고 싶은 객체의 위치는 객체의 특성으로 설정이 되어 있습니다.
#
# `tf.train.CheckpointManager`도 다수의 checkpoint를 관리할때 도움이 됩니다
# In[ ]:
opt = tf.keras.optimizers.Adam(0.1)
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=net)
manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3)
# #### 훈련하고 model checkpoint작성하기
# 다음 훈련 루프는 model과 optimizer의 인스턴스를 만든 후 `tf.train.Checkpoint` 객체에 수집합니다. 이것은 각 데이터 배치에 있는 루프의 훈련 단계를 호출하고, 주기적으로 디스크에 checkpoint를 작성합니다.
# In[ ]:
def train_and_checkpoint(net, manager):
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
else:
print("Initializing from scratch.")
for example in toy_dataset():
loss = train_step(net, example, opt)
ckpt.step.assign_add(1)
if int(ckpt.step) % 10 == 0:
save_path = manager.save()
print("Saved checkpoint for step {}: {}".format(int(ckpt.step), save_path))
print("loss {:1.2f}".format(loss.numpy()))
# In[ ]:
train_and_checkpoint(net, manager)
# #### 복구하고 훈련 계속하기
# 첫 번째 과정 이후 새로운 model과 매니저를 전달할 수 있지만, 일을 마무리 한 정확한 지점에서 훈련을 가져와야 합니다:
# In[ ]:
opt = tf.keras.optimizers.Adam(0.1)
net = Net()
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=net)
manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3)
train_and_checkpoint(net, manager)
# `tf.train.CheckpointManager` 객체가 이전 checkpoint들을 제거합니다. 위는 가장 최근의 3개 checkpoint만 유지하도록 구성되어 있습니다.
# In[ ]:
print(manager.checkpoints) # 남은 checkpoint들 나열
# 예를 들어, `'./tf_ckpts/ckpt-10'`같은 경로들은 디스크에 있는 파일이 아닙니다. 대신에 이 경로들은 `index` 파일과 변수 값들을 담고있는 파일들의 전위 표기입니다. 이 전위 표기들은 `CheckpointManager` 가 상태를 저장하는 하나의 checkpoint 파일 (`'./tf_ckpts/checkpoint'`) 에 그룹으로 묶여있습니다.
# In[ ]:
get_ipython().system('ls ./tf_ckpts')
# <a id="loading_mechanics"/>
#
# ## 작동 원리
#
# 텐서플로는 로드되는 객체에서 시작하여 명명된 엣지가 있는 방향 그래프를 통과시켜 변수를 checkpoint된 값과 일치시킵니다. 엣지의 이름들은 특히 기여한 객체의 이름에서 따왔습니다. 예를들면, `self.l1 = tf.keras.layers.Dense(5)`안의 `"l1"`. `tf.train.Checkpoint` 이것의 키워드 전달인자 이름을 사용했습니다, 여기에서는 `"step"` in `tf.train.Checkpoint(step=...)`.
#
# 위의 예에서 나온 종속성 그래프는 다음과 같습니다.:
#
# 
#
# optimizer는 빨간색으로, regular 변수는 파란색으로, optimizer 슬롯 변수는 주황색으로 표시합니다. 다른 nodes는, 예를 들면 `tf.train.Checkpoint`, 이 검은색임을 나타냅니다.
#
# 슬롯 변수는 optimizer의 일부지만 특정 변수에 대해 생성됩니다. `'m'` 위의 엣지는 모멘텀에 해당하며, 아담 optimizer는 각 변수에 대해 추적합니다. 슬롯 변수는 변수와 optimizer가 모두 저장될 경우에만 checkpoint에 저장되며, 따라서 파선 엣지가 됩니다.
# `tf.train.Checkpoint`로 불러온 `restore()` 오브젝트 큐는그`Checkpoint` 개체에서 일치하는 방법이 있습니다. 변수 값 복원을 요청한 복원 작업 대기 행렬로 정리합니다. 예를 들어, 우리는 네트워크와 계층을 통해 그것에 대한 하나의 경로를 재구성함으로서 위에서 정의한 모델에서 커널만 로드할 수 있습니다.
# In[ ]:
to_restore = tf.Variable(tf.zeros([5]))
print(to_restore.numpy()) # 모두 0입니다.
fake_layer = tf.train.Checkpoint(bias=to_restore)
fake_net = tf.train.Checkpoint(l1=fake_layer)
new_root = tf.train.Checkpoint(net=fake_net)
status = new_root.restore(tf.train.latest_checkpoint('./tf_ckpts/'))
print(to_restore.numpy()) # 우리는 복구된 변수를 이제 얻었습니다.
# 이 새로운 개체에 대한 의존도 그래프는 우리가 위에 적은 더 큰 checkpoint보다 작은 하위 그래프입니다. 이것은 오직 `tf.train.Checkpoint`에서 checkpoints 셀때 편향과 저장 카운터만 포함합니다.
#
# 
#
# `restore()` 함수는 선택적으로 확인을 거친 객체의 상태를 반환합니다. 새로 만든 checkpoint에서 우리가 만든 모든 개체가 복원되어 status.assert_existing_objects_match()가 통과합니다.
# In[ ]:
status.assert_existing_objects_matched()
# checkpoint에는 계층의 커널과 optimizer의 변수를 포함하여 일치하지 않는 많은 개체가 있습니다. status.assert_consumed()는 checkpoint와 프로그램이 정확히 일치할 경우에만 통과하고 여기에 예외를 둘 것입니다.
# ### 복구 지연
# 텐서플로우의 Layer 객체는 입력 형상을 이용할 수 있을 때 변수 생성을 첫 번째 호출로 지연시킬 수 있습니다. 예를 들어, 'Dense' 층의 커널의 모양은 계층의 입력과 출력 형태 모두에 따라 달라지기 때문에, 생성자 인수로 필요한 출력 형태는 그 자체로 변수를 만들기에 충분한 정보가 아닙니다. 예를 들어, 'Dense' 층의 커널의 모양은 계층의 입력과 출력 형태 모두에 따라 달라지기 때문에, 생성자 인수로 필요한 출력 형태는 그 자체로 변수를 만들기에 충분한 정보가 아닙니다.
#
# 이 관용구를 지지하려면 `tf.train.Checkpoint` queues는 일치하는 변수가 없는 것들을 복원합니다.
# In[ ]:
delayed_restore = tf.Variable(tf.zeros([1, 5]))
print(delayed_restore.numpy()) # 아직 복원이 안되어 값이 0입니다.
fake_layer.kernel = delayed_restore
print(delayed_restore.numpy()) # 복원되었습니다.
# ### checkpoints 수동 검사
#
# `tf.train.list_variables`에는 checkpoint 키와 변수 형태가 나열돼있습니다. Checkpoint의 키들은 위에 있는 그래프의 경로입니다.
# In[ ]:
tf.train.list_variables(tf.train.latest_checkpoint('./tf_ckpts/'))
# ### 목록 및 딕셔너리 추적
#
# `self.l1 = tf.keras.layer.Dense(5)`,와 같은 직접적인 속성 할당은 목록과 사전적 속성에 할당하면 내용이 추적됩니다.
# In[ ]:
save = tf.train.Checkpoint()
save.listed = [tf.Variable(1.)]
save.listed.append(tf.Variable(2.))
save.mapped = {'one': save.listed[0]}
save.mapped['two'] = save.listed[1]
save_path = save.save('./tf_list_example')
restore = tf.train.Checkpoint()
v2 = tf.Variable(0.)
assert 0. == v2.numpy() # 아직 복구되지 않았습니다.
restore.mapped = {'two': v2}
restore.restore(save_path)
assert 2. == v2.numpy()
# 당신은 래퍼(wrapper) 객체를 목록과 사전에 있음을 알아차릴겁니다. 이러한 래퍼는 기본 데이터 구조의 checkpoint 가능한 버전입니다. 속성 기반 로딩과 마찬가지로, 이러한 래퍼들은 변수의 값이 용기에 추가되는 즉시 복원됩니다.
# In[ ]:
restore.listed = []
print(restore.listed) # 리스트래퍼([])
v1 = tf.Variable(0.)
restore.listed.append(v1) # 이전 셀의 restore()에서 v1 복원합니다.
assert 1. == v1.numpy()
# f.keras의 하위 클래스에 동일한 추적이 자동으로 적용되고 예를 들어 레이어 목록을 추적하는 데 사용할 수 있는 모델입니다.
# ##Estimator를 사용하여 객체 기반 checkpoint를 저장하기
#
# [Estimator 가이드](https://www.tensorflow.org/guide/estimator)를 보십시오.
#
# Estimators는 기본적으로 이전 섹션에서 설명한 개체 그래프 대신 변수 이름을 가진 체크포인트를 저장합니다. tf.train.Checkpoint는 이름 기반 체크포인트를 사용할 수 있지만, 모델의 일부를 Estimator's model_fn 외부로 이동할 때 변수 이름이 변경될 수 있습니다. 객체 기반 checkpoints를 저장하면 Estimator 내에서 모델을 훈련시킨 후 외부에서 쉽게 사용할 수 있습니다.
# In[ ]:
import tensorflow.compat.v1 as tf_compat
# In[ ]:
def model_fn(features, labels, mode):
net = Net()
opt = tf.keras.optimizers.Adam(0.1)
ckpt = tf.train.Checkpoint(step=tf_compat.train.get_global_step(),
optimizer=opt, net=net)
with tf.GradientTape() as tape:
output = net(features['x'])
loss = tf.reduce_mean(tf.abs(output - features['y']))
variables = net.trainable_variables
gradients = tape.gradient(loss, variables)
return tf.estimator.EstimatorSpec(
mode,
loss=loss,
train_op=tf.group(opt.apply_gradients(zip(gradients, variables)),
ckpt.step.assign_add(1)),
# Estimator가 "ckpt"를 객체 기반의 꼴로 저장하게 합니다.
scaffold=tf_compat.train.Scaffold(saver=ckpt))
tf.keras.backend.clear_session()
est = tf.estimator.Estimator(model_fn, './tf_estimator_example/')
est.train(toy_dataset, steps=10)
# `tf.train.Checkpoint`는 그런 다음 `model_dir`에서 Estimator의 checkpoints를 로드할 수 있습니다.
# In[ ]:
opt = tf.keras.optimizers.Adam(0.1)
net = Net()
ckpt = tf.train.Checkpoint(
step=tf.Variable(1, dtype=tf.int64), optimizer=opt, net=net)
ckpt.restore(tf.train.latest_checkpoint('./tf_estimator_example/'))
ckpt.step.numpy() # est.train(..., steps=10)부터
# ## 요약
#
# 텐서프로우 객체는 사용하는 변수의 값을 저장하고 복원할 수 있는 쉬운 자동 메커니즘을 제공합니다.
#
| <filename>keras/checkpoint.py
#!/usr/bin/env python
# coding: utf-8
# ##### Copyright 2018 The TensorFlow Authors.
# In[ ]:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# # 체크포인트 훈련하기
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/guide/checkpoint"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/guide/checkpoint.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Google Colab)에서 실행하기</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/guide/checkpoint.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃헙(GitHub) 소스 보기</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/guide/checkpoint.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도
# 불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.
# 이 번역에 개선할 부분이 있다면
# [tensorflow/docs-l10n](https://github.com/tensorflow/docs-l10n/) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.
# 문서 번역이나 리뷰에 참여하려면
# [<EMAIL>](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로
# 메일을 보내주시기 바랍니다.
# "텐서플로 모델 저장하기" 라는 문구는 보통 둘중 하나를 의미합니다:
#
# 1. Checkpoints, 혹은
# 2. SavedModel.
#
# Checkpoint는 모델이 사용한 모든 매개변수(`tf.Variable` 객체들)의 정확한 값을 캡처합니다. Chekcpoint는 모델에 의해 정의된 연산에 대한 설명을 포함하지 않으므로 일반적으로 저장된 매개변수 값을 사용할 소스 코드를 사용할 수 있을 때만 유용합니다.
#
# 반면 SavedModel 형식은 매개변수 값(체크포인트) 외에 모델에 의해 정의된 연산에 대한 일련화된 설명을 포함합니다. 이 형식의 모델은 모델을 만든 소스 코드와 독립적입니다. 따라서 TensorFlow Serving, TensorFlow Lite, TensorFlow.js 또는 다른 프로그래밍 언어(C, C++, Java, Go, Rust, C# 등. TensorFlow APIs)로 배포하기에 적합합니다.
#
# 이 가이드는 체크포인트 쓰기 및 읽기를 위한 API들을 다룹니다.
# ## 설치
# In[ ]:
import tensorflow as tf
# In[ ]:
class Net(tf.keras.Model):
"""A simple linear model."""
def __init__(self):
super(Net, self).__init__()
self.l1 = tf.keras.layers.Dense(5)
def call(self, x):
return self.l1(x)
# In[ ]:
net = Net()
# ## `tf.keras` 훈련 API들로부터 저장하기
#
# [`tf.keras` 저장하고 복구하는
# 가이드](./keras/overview.ipynb#save_and_restore)를 읽어봅시다.
#
# `tf.keras.Model.save_weights` 가 텐서플로 CheckPoint를 저장합니다.
# In[ ]:
net.save_weights('easy_checkpoint')
# ## Checkpoints 작성하기
#
# 텐서플로 모델의 지속적인 상태는 `tf.Variable` 객체에 저장되어 있습니다. 이들은 직접으로 구성할 수 있지만, `tf.keras.layers` 혹은 `tf.keras.Model`와 같은 고수준 API들로 만들어 지기도 합니다.
#
# 변수를 관리하는 가장 쉬운 방법은 Python 객체에 변수를 연결한 다음 해당 객체를 참조하는 것입니다.
#
# `tf.train.Checkpoint`, `tf.keras.layers.Layer`, and `tf.keras.Model`의 하위클래스들은 해당 속성에 할당된 변수를 자동 추적합니다. 다음 예시는 간단한 선형 model을 구성하고, 모든 model 변수의 값을 포합하는 checkpoint를 씁니다.
# `Model.save_weights`를 사용해 손쉽게 model-checkpoint를 저장할 수 있습니다.
# ### 직접 Checkpoint작성하기
# #### 설치
# `tf.train.Checkpoint`의 모든 특성을 입증하기 위해서 toy dataset과 optimization step을 정의해야 합니다.
# In[ ]:
def toy_dataset():
inputs = tf.range(10.)[:, None]
labels = inputs * 5. + tf.range(5.)[None, :]
return tf.data.Dataset.from_tensor_slices(
dict(x=inputs, y=labels)).repeat(10).batch(2)
# In[ ]:
def train_step(net, example, optimizer):
"""Trains `net` on `example` using `optimizer`."""
with tf.GradientTape() as tape:
output = net(example['x'])
loss = tf.reduce_mean(tf.abs(output - example['y']))
variables = net.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return loss
# #### Checkpoint객체 생성
#
# 인위적으로 checkpoint를 만드려면 `tf.train.Checkpoint` 객체가 필요합니다. Checkpoint하고 싶은 객체의 위치는 객체의 특성으로 설정이 되어 있습니다.
#
# `tf.train.CheckpointManager`도 다수의 checkpoint를 관리할때 도움이 됩니다
# In[ ]:
opt = tf.keras.optimizers.Adam(0.1)
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=net)
manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3)
# #### 훈련하고 model checkpoint작성하기
# 다음 훈련 루프는 model과 optimizer의 인스턴스를 만든 후 `tf.train.Checkpoint` 객체에 수집합니다. 이것은 각 데이터 배치에 있는 루프의 훈련 단계를 호출하고, 주기적으로 디스크에 checkpoint를 작성합니다.
# In[ ]:
def train_and_checkpoint(net, manager):
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
else:
print("Initializing from scratch.")
for example in toy_dataset():
loss = train_step(net, example, opt)
ckpt.step.assign_add(1)
if int(ckpt.step) % 10 == 0:
save_path = manager.save()
print("Saved checkpoint for step {}: {}".format(int(ckpt.step), save_path))
print("loss {:1.2f}".format(loss.numpy()))
# In[ ]:
train_and_checkpoint(net, manager)
# #### 복구하고 훈련 계속하기
# 첫 번째 과정 이후 새로운 model과 매니저를 전달할 수 있지만, 일을 마무리 한 정확한 지점에서 훈련을 가져와야 합니다:
# In[ ]:
opt = tf.keras.optimizers.Adam(0.1)
net = Net()
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=net)
manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3)
train_and_checkpoint(net, manager)
# `tf.train.CheckpointManager` 객체가 이전 checkpoint들을 제거합니다. 위는 가장 최근의 3개 checkpoint만 유지하도록 구성되어 있습니다.
# In[ ]:
print(manager.checkpoints) # 남은 checkpoint들 나열
# 예를 들어, `'./tf_ckpts/ckpt-10'`같은 경로들은 디스크에 있는 파일이 아닙니다. 대신에 이 경로들은 `index` 파일과 변수 값들을 담고있는 파일들의 전위 표기입니다. 이 전위 표기들은 `CheckpointManager` 가 상태를 저장하는 하나의 checkpoint 파일 (`'./tf_ckpts/checkpoint'`) 에 그룹으로 묶여있습니다.
# In[ ]:
get_ipython().system('ls ./tf_ckpts')
# <a id="loading_mechanics"/>
#
# ## 작동 원리
#
# 텐서플로는 로드되는 객체에서 시작하여 명명된 엣지가 있는 방향 그래프를 통과시켜 변수를 checkpoint된 값과 일치시킵니다. 엣지의 이름들은 특히 기여한 객체의 이름에서 따왔습니다. 예를들면, `self.l1 = tf.keras.layers.Dense(5)`안의 `"l1"`. `tf.train.Checkpoint` 이것의 키워드 전달인자 이름을 사용했습니다, 여기에서는 `"step"` in `tf.train.Checkpoint(step=...)`.
#
# 위의 예에서 나온 종속성 그래프는 다음과 같습니다.:
#
# 
#
# optimizer는 빨간색으로, regular 변수는 파란색으로, optimizer 슬롯 변수는 주황색으로 표시합니다. 다른 nodes는, 예를 들면 `tf.train.Checkpoint`, 이 검은색임을 나타냅니다.
#
# 슬롯 변수는 optimizer의 일부지만 특정 변수에 대해 생성됩니다. `'m'` 위의 엣지는 모멘텀에 해당하며, 아담 optimizer는 각 변수에 대해 추적합니다. 슬롯 변수는 변수와 optimizer가 모두 저장될 경우에만 checkpoint에 저장되며, 따라서 파선 엣지가 됩니다.
# `tf.train.Checkpoint`로 불러온 `restore()` 오브젝트 큐는그`Checkpoint` 개체에서 일치하는 방법이 있습니다. 변수 값 복원을 요청한 복원 작업 대기 행렬로 정리합니다. 예를 들어, 우리는 네트워크와 계층을 통해 그것에 대한 하나의 경로를 재구성함으로서 위에서 정의한 모델에서 커널만 로드할 수 있습니다.
# In[ ]:
to_restore = tf.Variable(tf.zeros([5]))
print(to_restore.numpy()) # 모두 0입니다.
fake_layer = tf.train.Checkpoint(bias=to_restore)
fake_net = tf.train.Checkpoint(l1=fake_layer)
new_root = tf.train.Checkpoint(net=fake_net)
status = new_root.restore(tf.train.latest_checkpoint('./tf_ckpts/'))
print(to_restore.numpy()) # 우리는 복구된 변수를 이제 얻었습니다.
# 이 새로운 개체에 대한 의존도 그래프는 우리가 위에 적은 더 큰 checkpoint보다 작은 하위 그래프입니다. 이것은 오직 `tf.train.Checkpoint`에서 checkpoints 셀때 편향과 저장 카운터만 포함합니다.
#
# 
#
# `restore()` 함수는 선택적으로 확인을 거친 객체의 상태를 반환합니다. 새로 만든 checkpoint에서 우리가 만든 모든 개체가 복원되어 status.assert_existing_objects_match()가 통과합니다.
# In[ ]:
status.assert_existing_objects_matched()
# checkpoint에는 계층의 커널과 optimizer의 변수를 포함하여 일치하지 않는 많은 개체가 있습니다. status.assert_consumed()는 checkpoint와 프로그램이 정확히 일치할 경우에만 통과하고 여기에 예외를 둘 것입니다.
# ### 복구 지연
# 텐서플로우의 Layer 객체는 입력 형상을 이용할 수 있을 때 변수 생성을 첫 번째 호출로 지연시킬 수 있습니다. 예를 들어, 'Dense' 층의 커널의 모양은 계층의 입력과 출력 형태 모두에 따라 달라지기 때문에, 생성자 인수로 필요한 출력 형태는 그 자체로 변수를 만들기에 충분한 정보가 아닙니다. 예를 들어, 'Dense' 층의 커널의 모양은 계층의 입력과 출력 형태 모두에 따라 달라지기 때문에, 생성자 인수로 필요한 출력 형태는 그 자체로 변수를 만들기에 충분한 정보가 아닙니다.
#
# 이 관용구를 지지하려면 `tf.train.Checkpoint` queues는 일치하는 변수가 없는 것들을 복원합니다.
# In[ ]:
delayed_restore = tf.Variable(tf.zeros([1, 5]))
print(delayed_restore.numpy()) # 아직 복원이 안되어 값이 0입니다.
fake_layer.kernel = delayed_restore
print(delayed_restore.numpy()) # 복원되었습니다.
# ### checkpoints 수동 검사
#
# `tf.train.list_variables`에는 checkpoint 키와 변수 형태가 나열돼있습니다. Checkpoint의 키들은 위에 있는 그래프의 경로입니다.
# In[ ]:
tf.train.list_variables(tf.train.latest_checkpoint('./tf_ckpts/'))
# ### 목록 및 딕셔너리 추적
#
# `self.l1 = tf.keras.layer.Dense(5)`,와 같은 직접적인 속성 할당은 목록과 사전적 속성에 할당하면 내용이 추적됩니다.
# In[ ]:
save = tf.train.Checkpoint()
save.listed = [tf.Variable(1.)]
save.listed.append(tf.Variable(2.))
save.mapped = {'one': save.listed[0]}
save.mapped['two'] = save.listed[1]
save_path = save.save('./tf_list_example')
restore = tf.train.Checkpoint()
v2 = tf.Variable(0.)
assert 0. == v2.numpy() # 아직 복구되지 않았습니다.
restore.mapped = {'two': v2}
restore.restore(save_path)
assert 2. == v2.numpy()
# 당신은 래퍼(wrapper) 객체를 목록과 사전에 있음을 알아차릴겁니다. 이러한 래퍼는 기본 데이터 구조의 checkpoint 가능한 버전입니다. 속성 기반 로딩과 마찬가지로, 이러한 래퍼들은 변수의 값이 용기에 추가되는 즉시 복원됩니다.
# In[ ]:
restore.listed = []
print(restore.listed) # 리스트래퍼([])
v1 = tf.Variable(0.)
restore.listed.append(v1) # 이전 셀의 restore()에서 v1 복원합니다.
assert 1. == v1.numpy()
# f.keras의 하위 클래스에 동일한 추적이 자동으로 적용되고 예를 들어 레이어 목록을 추적하는 데 사용할 수 있는 모델입니다.
# ##Estimator를 사용하여 객체 기반 checkpoint를 저장하기
#
# [Estimator 가이드](https://www.tensorflow.org/guide/estimator)를 보십시오.
#
# Estimators는 기본적으로 이전 섹션에서 설명한 개체 그래프 대신 변수 이름을 가진 체크포인트를 저장합니다. tf.train.Checkpoint는 이름 기반 체크포인트를 사용할 수 있지만, 모델의 일부를 Estimator's model_fn 외부로 이동할 때 변수 이름이 변경될 수 있습니다. 객체 기반 checkpoints를 저장하면 Estimator 내에서 모델을 훈련시킨 후 외부에서 쉽게 사용할 수 있습니다.
# In[ ]:
import tensorflow.compat.v1 as tf_compat
# In[ ]:
def model_fn(features, labels, mode):
net = Net()
opt = tf.keras.optimizers.Adam(0.1)
ckpt = tf.train.Checkpoint(step=tf_compat.train.get_global_step(),
optimizer=opt, net=net)
with tf.GradientTape() as tape:
output = net(features['x'])
loss = tf.reduce_mean(tf.abs(output - features['y']))
variables = net.trainable_variables
gradients = tape.gradient(loss, variables)
return tf.estimator.EstimatorSpec(
mode,
loss=loss,
train_op=tf.group(opt.apply_gradients(zip(gradients, variables)),
ckpt.step.assign_add(1)),
# Estimator가 "ckpt"를 객체 기반의 꼴로 저장하게 합니다.
scaffold=tf_compat.train.Scaffold(saver=ckpt))
tf.keras.backend.clear_session()
est = tf.estimator.Estimator(model_fn, './tf_estimator_example/')
est.train(toy_dataset, steps=10)
# `tf.train.Checkpoint`는 그런 다음 `model_dir`에서 Estimator의 checkpoints를 로드할 수 있습니다.
# In[ ]:
opt = tf.keras.optimizers.Adam(0.1)
net = Net()
ckpt = tf.train.Checkpoint(
step=tf.Variable(1, dtype=tf.int64), optimizer=opt, net=net)
ckpt.restore(tf.train.latest_checkpoint('./tf_estimator_example/'))
ckpt.step.numpy() # est.train(..., steps=10)부터
# ## 요약
#
# 텐서프로우 객체는 사용하는 변수의 값을 저장하고 복원할 수 있는 쉬운 자동 메커니즘을 제공합니다.
#
| ko | 0.997576 | #!/usr/bin/env python # coding: utf-8 # ##### Copyright 2018 The TensorFlow Authors. # In[ ]: #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # 체크포인트 훈련하기 # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/guide/checkpoint"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/guide/checkpoint.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Google Colab)에서 실행하기</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/guide/checkpoint.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃헙(GitHub) 소스 보기</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/guide/checkpoint.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도 # 불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다. # 이 번역에 개선할 부분이 있다면 # [tensorflow/docs-l10n](https://github.com/tensorflow/docs-l10n/) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다. # 문서 번역이나 리뷰에 참여하려면 # [<EMAIL>](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로 # 메일을 보내주시기 바랍니다. # "텐서플로 모델 저장하기" 라는 문구는 보통 둘중 하나를 의미합니다: # # 1. Checkpoints, 혹은 # 2. SavedModel. # # Checkpoint는 모델이 사용한 모든 매개변수(`tf.Variable` 객체들)의 정확한 값을 캡처합니다. Chekcpoint는 모델에 의해 정의된 연산에 대한 설명을 포함하지 않으므로 일반적으로 저장된 매개변수 값을 사용할 소스 코드를 사용할 수 있을 때만 유용합니다. # # 반면 SavedModel 형식은 매개변수 값(체크포인트) 외에 모델에 의해 정의된 연산에 대한 일련화된 설명을 포함합니다. 이 형식의 모델은 모델을 만든 소스 코드와 독립적입니다. 따라서 TensorFlow Serving, TensorFlow Lite, TensorFlow.js 또는 다른 프로그래밍 언어(C, C++, Java, Go, Rust, C# 등. TensorFlow APIs)로 배포하기에 적합합니다. # # 이 가이드는 체크포인트 쓰기 및 읽기를 위한 API들을 다룹니다. # ## 설치 # In[ ]: # In[ ]: A simple linear model. # In[ ]: # ## `tf.keras` 훈련 API들로부터 저장하기 # # [`tf.keras` 저장하고 복구하는 # 가이드](./keras/overview.ipynb#save_and_restore)를 읽어봅시다. # # `tf.keras.Model.save_weights` 가 텐서플로 CheckPoint를 저장합니다. # In[ ]: # ## Checkpoints 작성하기 # # 텐서플로 모델의 지속적인 상태는 `tf.Variable` 객체에 저장되어 있습니다. 이들은 직접으로 구성할 수 있지만, `tf.keras.layers` 혹은 `tf.keras.Model`와 같은 고수준 API들로 만들어 지기도 합니다. # # 변수를 관리하는 가장 쉬운 방법은 Python 객체에 변수를 연결한 다음 해당 객체를 참조하는 것입니다. # # `tf.train.Checkpoint`, `tf.keras.layers.Layer`, and `tf.keras.Model`의 하위클래스들은 해당 속성에 할당된 변수를 자동 추적합니다. 다음 예시는 간단한 선형 model을 구성하고, 모든 model 변수의 값을 포합하는 checkpoint를 씁니다. # `Model.save_weights`를 사용해 손쉽게 model-checkpoint를 저장할 수 있습니다. # ### 직접 Checkpoint작성하기 # #### 설치 # `tf.train.Checkpoint`의 모든 특성을 입증하기 위해서 toy dataset과 optimization step을 정의해야 합니다. # In[ ]: # In[ ]: Trains `net` on `example` using `optimizer`. # #### Checkpoint객체 생성 # # 인위적으로 checkpoint를 만드려면 `tf.train.Checkpoint` 객체가 필요합니다. Checkpoint하고 싶은 객체의 위치는 객체의 특성으로 설정이 되어 있습니다. # # `tf.train.CheckpointManager`도 다수의 checkpoint를 관리할때 도움이 됩니다 # In[ ]: # #### 훈련하고 model checkpoint작성하기 # 다음 훈련 루프는 model과 optimizer의 인스턴스를 만든 후 `tf.train.Checkpoint` 객체에 수집합니다. 이것은 각 데이터 배치에 있는 루프의 훈련 단계를 호출하고, 주기적으로 디스크에 checkpoint를 작성합니다. # In[ ]: # In[ ]: # #### 복구하고 훈련 계속하기 # 첫 번째 과정 이후 새로운 model과 매니저를 전달할 수 있지만, 일을 마무리 한 정확한 지점에서 훈련을 가져와야 합니다: # In[ ]: # `tf.train.CheckpointManager` 객체가 이전 checkpoint들을 제거합니다. 위는 가장 최근의 3개 checkpoint만 유지하도록 구성되어 있습니다. # In[ ]: # 남은 checkpoint들 나열 # 예를 들어, `'./tf_ckpts/ckpt-10'`같은 경로들은 디스크에 있는 파일이 아닙니다. 대신에 이 경로들은 `index` 파일과 변수 값들을 담고있는 파일들의 전위 표기입니다. 이 전위 표기들은 `CheckpointManager` 가 상태를 저장하는 하나의 checkpoint 파일 (`'./tf_ckpts/checkpoint'`) 에 그룹으로 묶여있습니다. # In[ ]: # <a id="loading_mechanics"/> # # ## 작동 원리 # # 텐서플로는 로드되는 객체에서 시작하여 명명된 엣지가 있는 방향 그래프를 통과시켜 변수를 checkpoint된 값과 일치시킵니다. 엣지의 이름들은 특히 기여한 객체의 이름에서 따왔습니다. 예를들면, `self.l1 = tf.keras.layers.Dense(5)`안의 `"l1"`. `tf.train.Checkpoint` 이것의 키워드 전달인자 이름을 사용했습니다, 여기에서는 `"step"` in `tf.train.Checkpoint(step=...)`. # # 위의 예에서 나온 종속성 그래프는 다음과 같습니다.: # #  # # optimizer는 빨간색으로, regular 변수는 파란색으로, optimizer 슬롯 변수는 주황색으로 표시합니다. 다른 nodes는, 예를 들면 `tf.train.Checkpoint`, 이 검은색임을 나타냅니다. # # 슬롯 변수는 optimizer의 일부지만 특정 변수에 대해 생성됩니다. `'m'` 위의 엣지는 모멘텀에 해당하며, 아담 optimizer는 각 변수에 대해 추적합니다. 슬롯 변수는 변수와 optimizer가 모두 저장될 경우에만 checkpoint에 저장되며, 따라서 파선 엣지가 됩니다. # `tf.train.Checkpoint`로 불러온 `restore()` 오브젝트 큐는그`Checkpoint` 개체에서 일치하는 방법이 있습니다. 변수 값 복원을 요청한 복원 작업 대기 행렬로 정리합니다. 예를 들어, 우리는 네트워크와 계층을 통해 그것에 대한 하나의 경로를 재구성함으로서 위에서 정의한 모델에서 커널만 로드할 수 있습니다. # In[ ]: # 모두 0입니다. # 우리는 복구된 변수를 이제 얻었습니다. # 이 새로운 개체에 대한 의존도 그래프는 우리가 위에 적은 더 큰 checkpoint보다 작은 하위 그래프입니다. 이것은 오직 `tf.train.Checkpoint`에서 checkpoints 셀때 편향과 저장 카운터만 포함합니다. # #  # # `restore()` 함수는 선택적으로 확인을 거친 객체의 상태를 반환합니다. 새로 만든 checkpoint에서 우리가 만든 모든 개체가 복원되어 status.assert_existing_objects_match()가 통과합니다. # In[ ]: # checkpoint에는 계층의 커널과 optimizer의 변수를 포함하여 일치하지 않는 많은 개체가 있습니다. status.assert_consumed()는 checkpoint와 프로그램이 정확히 일치할 경우에만 통과하고 여기에 예외를 둘 것입니다. # ### 복구 지연 # 텐서플로우의 Layer 객체는 입력 형상을 이용할 수 있을 때 변수 생성을 첫 번째 호출로 지연시킬 수 있습니다. 예를 들어, 'Dense' 층의 커널의 모양은 계층의 입력과 출력 형태 모두에 따라 달라지기 때문에, 생성자 인수로 필요한 출력 형태는 그 자체로 변수를 만들기에 충분한 정보가 아닙니다. 예를 들어, 'Dense' 층의 커널의 모양은 계층의 입력과 출력 형태 모두에 따라 달라지기 때문에, 생성자 인수로 필요한 출력 형태는 그 자체로 변수를 만들기에 충분한 정보가 아닙니다. # # 이 관용구를 지지하려면 `tf.train.Checkpoint` queues는 일치하는 변수가 없는 것들을 복원합니다. # In[ ]: # 아직 복원이 안되어 값이 0입니다. # 복원되었습니다. # ### checkpoints 수동 검사 # # `tf.train.list_variables`에는 checkpoint 키와 변수 형태가 나열돼있습니다. Checkpoint의 키들은 위에 있는 그래프의 경로입니다. # In[ ]: # ### 목록 및 딕셔너리 추적 # # `self.l1 = tf.keras.layer.Dense(5)`,와 같은 직접적인 속성 할당은 목록과 사전적 속성에 할당하면 내용이 추적됩니다. # In[ ]: # 아직 복구되지 않았습니다. # 당신은 래퍼(wrapper) 객체를 목록과 사전에 있음을 알아차릴겁니다. 이러한 래퍼는 기본 데이터 구조의 checkpoint 가능한 버전입니다. 속성 기반 로딩과 마찬가지로, 이러한 래퍼들은 변수의 값이 용기에 추가되는 즉시 복원됩니다. # In[ ]: # 리스트래퍼([]) # 이전 셀의 restore()에서 v1 복원합니다. # f.keras의 하위 클래스에 동일한 추적이 자동으로 적용되고 예를 들어 레이어 목록을 추적하는 데 사용할 수 있는 모델입니다. # ##Estimator를 사용하여 객체 기반 checkpoint를 저장하기 # # [Estimator 가이드](https://www.tensorflow.org/guide/estimator)를 보십시오. # # Estimators는 기본적으로 이전 섹션에서 설명한 개체 그래프 대신 변수 이름을 가진 체크포인트를 저장합니다. tf.train.Checkpoint는 이름 기반 체크포인트를 사용할 수 있지만, 모델의 일부를 Estimator's model_fn 외부로 이동할 때 변수 이름이 변경될 수 있습니다. 객체 기반 checkpoints를 저장하면 Estimator 내에서 모델을 훈련시킨 후 외부에서 쉽게 사용할 수 있습니다. # In[ ]: # In[ ]: # Estimator가 "ckpt"를 객체 기반의 꼴로 저장하게 합니다. # `tf.train.Checkpoint`는 그런 다음 `model_dir`에서 Estimator의 checkpoints를 로드할 수 있습니다. # In[ ]: # est.train(..., steps=10)부터 # ## 요약 # # 텐서프로우 객체는 사용하는 변수의 값을 저장하고 복원할 수 있는 쉬운 자동 메커니즘을 제공합니다. # | 1.772429 | 2 |
zerovl/models/backbones/mml/huggingface_builder.py | zerovl/ZeroVL | 14 | 6617012 | import torch.nn as nn
from transformers import AutoModel, AutoConfig
from ..builder import BACKBONE
class HuggingFaceModel(nn.Module):
def __init__(self, cfg, **kwargs):
super(HuggingFaceModel, self).__init__()
if cfg.model.text_encoder.pretrained:
self.model = AutoModel.from_pretrained(cfg.model.text_encoder.tag,
cache_dir=f'/home/tiger/.cache/torch/hub/checkpoints/{cfg.model.text_encoder.tag}',
add_pooling_layer=False)
else:
config = AutoConfig.from_pretrained(cfg.model.text_encoder.tag)
self.model = AutoModel.from_config(config)
def forward(self, input_ids, attention_mask, **kwargs):
return self.model(input_ids=input_ids, attention_mask=attention_mask)
@BACKBONE.register_obj
def huggingface_modelzoo(cfg, **kwargs):
model = HuggingFaceModel(cfg, **kwargs)
return model
| import torch.nn as nn
from transformers import AutoModel, AutoConfig
from ..builder import BACKBONE
class HuggingFaceModel(nn.Module):
def __init__(self, cfg, **kwargs):
super(HuggingFaceModel, self).__init__()
if cfg.model.text_encoder.pretrained:
self.model = AutoModel.from_pretrained(cfg.model.text_encoder.tag,
cache_dir=f'/home/tiger/.cache/torch/hub/checkpoints/{cfg.model.text_encoder.tag}',
add_pooling_layer=False)
else:
config = AutoConfig.from_pretrained(cfg.model.text_encoder.tag)
self.model = AutoModel.from_config(config)
def forward(self, input_ids, attention_mask, **kwargs):
return self.model(input_ids=input_ids, attention_mask=attention_mask)
@BACKBONE.register_obj
def huggingface_modelzoo(cfg, **kwargs):
model = HuggingFaceModel(cfg, **kwargs)
return model
| none | 1 | 2.231316 | 2 | |
seed_detector/defaults.py | TankredO/seed_detector | 1 | 6617013 | <reponame>TankredO/seed_detector<filename>seed_detector/defaults.py
DEFAULT_N_POLYGON_VERTICES = 500
DEFAULT_AREA_THRESHOLD = 1024
IMAGE_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.gif')
| DEFAULT_N_POLYGON_VERTICES = 500
DEFAULT_AREA_THRESHOLD = 1024
IMAGE_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.gif') | none | 1 | 1.074914 | 1 | |
gammapy/utils/modeling.py | mealworm/gammapy | 0 | 6617014 | <filename>gammapy/utils/modeling.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Model parameter handling
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import abc
import numpy as np
import copy
from ..extern import six
from astropy import units as u
from astropy.table import Table, Column, vstack
from ..extern import xmltodict
from .scripts import make_path
__all__ = [
'Parameter',
'ParameterList',
]
class Parameter(object):
"""
Class representing model parameters.
Parameters
----------
name : str
Name of the parameter
value : float or `~astropy.units.Quantity`
Value of the parameter
unit : str, optional
Unit of the parameter (if value is given as float)
parmin : float, optional
Parameter value minimum. Used as minimum boundary value
in a model fit
parmax : float, optional
Parameter value maximum. Used as minimum boundary value
in a model fit
frozen : bool, optional
Whether the parameter is free to be varied in a model fit
"""
def __init__(self, name, value, unit='', parmin=None, parmax=None, frozen=False):
self.name = name
if isinstance(value, u.Quantity) or isinstance(value, six.string_types):
self.quantity = value
else:
self.value = value
self.unit = unit
self.parmin = parmin or np.nan
self.parmax = parmax or np.nan
self.frozen = frozen
@property
def quantity(self):
retval = self.value * u.Unit(self.unit)
return retval
@quantity.setter
def quantity(self, par):
par = u.Quantity(par)
self.value = par.value
self.unit = str(par.unit)
def __str__(self):
ss = 'Parameter(name={name!r}, value={value!r}, unit={unit!r}, '
ss += 'min={parmin!r}, max={parmax!r}, frozen={frozen!r})'
return ss.format(**self.__dict__)
def to_dict(self):
return dict(name=self.name,
value=float(self.value),
unit=str(self.unit),
frozen=self.frozen,
min=float(self.parmin),
max=float(self.parmax))
def to_sherpa(self, modelname='Default'):
"""Convert to sherpa parameter"""
from sherpa.models import Parameter
parmin = np.finfo(np.float32).min if np.isnan(self.parmin) else self.parmin
parmax = np.finfo(np.float32).max if np.isnan(self.parmax) else self.parmax
par = Parameter(modelname=modelname, name=self.name,
val=self.value, units=self.unit,
min=parmin, max=parmax,
frozen=self.frozen)
return par
class ParameterList(object):
"""List of `~gammapy.spectrum.models.Parameter`
Holds covariance matrix
Parameters
----------
parameters : list of `Parameter`
List of parameters
covariance : `~numpy.ndarray`
Parameters covariance matrix. Order of values as specified by
`parameters`.
"""
def __init__(self, parameters, covariance=None):
self.parameters = parameters
self.covariance = covariance
def __str__(self):
ss = self.__class__.__name__
for par in self.parameters:
ss += '\n{}'.format(par)
ss += '\n\nCovariance: \n{}'.format(self.covariance)
return ss
def __getitem__(self, name):
"""Access parameter by name"""
for par in self.parameters:
if name == par.name:
return par
raise IndexError('Parameter {} not found for : {}'.format(name, self))
def to_dict(self):
retval = dict(parameters=list(), covariance=None)
for par in self.parameters:
retval['parameters'].append(par.to_dict())
if self.covariance is not None:
retval['covariance'] = self.covariance.tolist()
return retval
def to_list_of_dict(self):
result = []
for parameter in self.parameters:
vals = parameter.to_dict()
if self.covariance is None:
vals['error'] = np.nan
else:
vals['error'] = self.error(parameter.name)
result.append(vals)
return result
def to_table(self):
"""
Serialize parameter list into `~astropy.table.Table`
"""
names = ['name', 'value', 'error', 'unit', 'min', 'max', 'frozen']
formats = {'value': '.3e',
'error': '.3e',
'min': '.3e',
'max': '.3e'}
table = Table(self.to_list_of_dict(), names=names)
for name in formats:
table[name].format = formats[name]
return table
@classmethod
def from_dict(cls, val):
pars = list()
for par in val['parameters']:
pars.append(Parameter(name=par['name'],
value=float(par['value']),
unit=par['unit'],
parmin=float(par['min']),
parmax=float(par['max']),
frozen=par['frozen']))
try:
covariance = np.array(val['covariance'])
except KeyError:
covariance = None
return cls(parameters=pars, covariance=covariance)
# TODO: this is a temporary solution until we have a better way
# to handle covariance matrices via a class
def covariance_to_table(self):
"""
Serialize parameter covariance into `~astropy.table.Table`
"""
t = Table(self.covariance, names=self.names)[self.free]
for name in t.colnames:
t[name].format = '.3'
col = Column(name='name/name', data=self.names)
t.add_column(col, index=0)
rows = [row for row in t if row['name/name'] in self.free]
return vstack(rows)
@property
def names(self):
"""List of parameter names"""
return [par.name for par in self.parameters]
@property
def _ufloats(self):
"""
Return dict of ufloats with covariance
"""
from uncertainties import correlated_values
values = [_.value for _ in self.parameters]
try:
# convert existing parameters to ufloats
uarray = correlated_values(values, self.covariance)
except np.linalg.LinAlgError:
raise ValueError('Covariance matrix not set.')
upars = {}
for par, upar in zip(self.parameters, uarray):
upars[par.name] = upar
return upars
@property
def free(self):
"""
Return list of free parameters names.
"""
free_pars = [par.name for par in self.parameters if not par.frozen]
return free_pars
@property
def frozen(self):
"""
Return list of frozen parameters names.
"""
frozen_pars = [par.name for par in self.parameters if par.frozen]
return frozen_pars
# TODO: this is a temporary solution until we have a better way
# to handle covariance matrices via a class
def set_parameter_errors(self, errors):
"""
Set uncorrelated parameters errors.
Parameters
----------
errors : dict of `~astropy.units.Quantity`
Dict of parameter errors.
"""
values = []
for par in self.parameters:
quantity = errors.get(par.name, 0 * u.Unit(par.unit))
values.append(u.Quantity(quantity, par.unit).value)
self.covariance = np.diag(values) ** 2
# TODO: this is a temporary solution until we have a better way
# to handle covariance matrices via a class
def set_parameter_covariance(self, covariance, covar_axis):
"""
Set full correlated parameters errors.
Parameters
----------
covariance : array-like
Covariance matrix
covar_axis : list
List of strings defining the parameter order in covariance
"""
shape = (len(self.parameters), len(self.parameters))
covariance_new = np.zeros(shape)
idx_lookup = dict([(par.name, idx) for idx, par in enumerate(self.parameters)])
# TODO: make use of covariance matrix symmetry
for i, par in enumerate(covar_axis):
i_new = idx_lookup[par]
for j, par_other in enumerate(covar_axis):
j_new = idx_lookup[par_other]
covariance_new[i_new, j_new] = covariance[i, j]
self.covariance = covariance_new
# TODO: this is a temporary solution until we have a better way
# to handle covariance matrices via a class
def error(self, parname):
"""
Return error on a given parameter
Parameters
----------
parname : str
Parameter
"""
if self.covariance is None:
raise ValueError('Covariance matrix not set.')
for i, parameter in enumerate(self.parameters):
if parameter.name == parname:
return np.sqrt(self.covariance[i, i])
raise ValueError('Could not find parameter {}'.format(parname))
def copy(self):
"""A deep copy"""
return copy.deepcopy(self)
| <filename>gammapy/utils/modeling.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Model parameter handling
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import abc
import numpy as np
import copy
from ..extern import six
from astropy import units as u
from astropy.table import Table, Column, vstack
from ..extern import xmltodict
from .scripts import make_path
__all__ = [
'Parameter',
'ParameterList',
]
class Parameter(object):
"""
Class representing model parameters.
Parameters
----------
name : str
Name of the parameter
value : float or `~astropy.units.Quantity`
Value of the parameter
unit : str, optional
Unit of the parameter (if value is given as float)
parmin : float, optional
Parameter value minimum. Used as minimum boundary value
in a model fit
parmax : float, optional
Parameter value maximum. Used as minimum boundary value
in a model fit
frozen : bool, optional
Whether the parameter is free to be varied in a model fit
"""
def __init__(self, name, value, unit='', parmin=None, parmax=None, frozen=False):
self.name = name
if isinstance(value, u.Quantity) or isinstance(value, six.string_types):
self.quantity = value
else:
self.value = value
self.unit = unit
self.parmin = parmin or np.nan
self.parmax = parmax or np.nan
self.frozen = frozen
@property
def quantity(self):
retval = self.value * u.Unit(self.unit)
return retval
@quantity.setter
def quantity(self, par):
par = u.Quantity(par)
self.value = par.value
self.unit = str(par.unit)
def __str__(self):
ss = 'Parameter(name={name!r}, value={value!r}, unit={unit!r}, '
ss += 'min={parmin!r}, max={parmax!r}, frozen={frozen!r})'
return ss.format(**self.__dict__)
def to_dict(self):
return dict(name=self.name,
value=float(self.value),
unit=str(self.unit),
frozen=self.frozen,
min=float(self.parmin),
max=float(self.parmax))
def to_sherpa(self, modelname='Default'):
"""Convert to sherpa parameter"""
from sherpa.models import Parameter
parmin = np.finfo(np.float32).min if np.isnan(self.parmin) else self.parmin
parmax = np.finfo(np.float32).max if np.isnan(self.parmax) else self.parmax
par = Parameter(modelname=modelname, name=self.name,
val=self.value, units=self.unit,
min=parmin, max=parmax,
frozen=self.frozen)
return par
class ParameterList(object):
"""List of `~gammapy.spectrum.models.Parameter`
Holds covariance matrix
Parameters
----------
parameters : list of `Parameter`
List of parameters
covariance : `~numpy.ndarray`
Parameters covariance matrix. Order of values as specified by
`parameters`.
"""
def __init__(self, parameters, covariance=None):
self.parameters = parameters
self.covariance = covariance
def __str__(self):
ss = self.__class__.__name__
for par in self.parameters:
ss += '\n{}'.format(par)
ss += '\n\nCovariance: \n{}'.format(self.covariance)
return ss
def __getitem__(self, name):
"""Access parameter by name"""
for par in self.parameters:
if name == par.name:
return par
raise IndexError('Parameter {} not found for : {}'.format(name, self))
def to_dict(self):
retval = dict(parameters=list(), covariance=None)
for par in self.parameters:
retval['parameters'].append(par.to_dict())
if self.covariance is not None:
retval['covariance'] = self.covariance.tolist()
return retval
def to_list_of_dict(self):
result = []
for parameter in self.parameters:
vals = parameter.to_dict()
if self.covariance is None:
vals['error'] = np.nan
else:
vals['error'] = self.error(parameter.name)
result.append(vals)
return result
def to_table(self):
"""
Serialize parameter list into `~astropy.table.Table`
"""
names = ['name', 'value', 'error', 'unit', 'min', 'max', 'frozen']
formats = {'value': '.3e',
'error': '.3e',
'min': '.3e',
'max': '.3e'}
table = Table(self.to_list_of_dict(), names=names)
for name in formats:
table[name].format = formats[name]
return table
@classmethod
def from_dict(cls, val):
pars = list()
for par in val['parameters']:
pars.append(Parameter(name=par['name'],
value=float(par['value']),
unit=par['unit'],
parmin=float(par['min']),
parmax=float(par['max']),
frozen=par['frozen']))
try:
covariance = np.array(val['covariance'])
except KeyError:
covariance = None
return cls(parameters=pars, covariance=covariance)
# TODO: this is a temporary solution until we have a better way
# to handle covariance matrices via a class
def covariance_to_table(self):
"""
Serialize parameter covariance into `~astropy.table.Table`
"""
t = Table(self.covariance, names=self.names)[self.free]
for name in t.colnames:
t[name].format = '.3'
col = Column(name='name/name', data=self.names)
t.add_column(col, index=0)
rows = [row for row in t if row['name/name'] in self.free]
return vstack(rows)
@property
def names(self):
"""List of parameter names"""
return [par.name for par in self.parameters]
@property
def _ufloats(self):
"""
Return dict of ufloats with covariance
"""
from uncertainties import correlated_values
values = [_.value for _ in self.parameters]
try:
# convert existing parameters to ufloats
uarray = correlated_values(values, self.covariance)
except np.linalg.LinAlgError:
raise ValueError('Covariance matrix not set.')
upars = {}
for par, upar in zip(self.parameters, uarray):
upars[par.name] = upar
return upars
@property
def free(self):
"""
Return list of free parameters names.
"""
free_pars = [par.name for par in self.parameters if not par.frozen]
return free_pars
@property
def frozen(self):
"""
Return list of frozen parameters names.
"""
frozen_pars = [par.name for par in self.parameters if par.frozen]
return frozen_pars
# TODO: this is a temporary solution until we have a better way
# to handle covariance matrices via a class
def set_parameter_errors(self, errors):
"""
Set uncorrelated parameters errors.
Parameters
----------
errors : dict of `~astropy.units.Quantity`
Dict of parameter errors.
"""
values = []
for par in self.parameters:
quantity = errors.get(par.name, 0 * u.Unit(par.unit))
values.append(u.Quantity(quantity, par.unit).value)
self.covariance = np.diag(values) ** 2
# TODO: this is a temporary solution until we have a better way
# to handle covariance matrices via a class
def set_parameter_covariance(self, covariance, covar_axis):
"""
Set full correlated parameters errors.
Parameters
----------
covariance : array-like
Covariance matrix
covar_axis : list
List of strings defining the parameter order in covariance
"""
shape = (len(self.parameters), len(self.parameters))
covariance_new = np.zeros(shape)
idx_lookup = dict([(par.name, idx) for idx, par in enumerate(self.parameters)])
# TODO: make use of covariance matrix symmetry
for i, par in enumerate(covar_axis):
i_new = idx_lookup[par]
for j, par_other in enumerate(covar_axis):
j_new = idx_lookup[par_other]
covariance_new[i_new, j_new] = covariance[i, j]
self.covariance = covariance_new
# TODO: this is a temporary solution until we have a better way
# to handle covariance matrices via a class
def error(self, parname):
"""
Return error on a given parameter
Parameters
----------
parname : str
Parameter
"""
if self.covariance is None:
raise ValueError('Covariance matrix not set.')
for i, parameter in enumerate(self.parameters):
if parameter.name == parname:
return np.sqrt(self.covariance[i, i])
raise ValueError('Could not find parameter {}'.format(parname))
def copy(self):
"""A deep copy"""
return copy.deepcopy(self)
| en | 0.332711 | # Licensed under a 3-clause BSD style license - see LICENSE.rst Model parameter handling Class representing model parameters. Parameters ---------- name : str Name of the parameter value : float or `~astropy.units.Quantity` Value of the parameter unit : str, optional Unit of the parameter (if value is given as float) parmin : float, optional Parameter value minimum. Used as minimum boundary value in a model fit parmax : float, optional Parameter value maximum. Used as minimum boundary value in a model fit frozen : bool, optional Whether the parameter is free to be varied in a model fit Convert to sherpa parameter List of `~gammapy.spectrum.models.Parameter` Holds covariance matrix Parameters ---------- parameters : list of `Parameter` List of parameters covariance : `~numpy.ndarray` Parameters covariance matrix. Order of values as specified by `parameters`. Access parameter by name Serialize parameter list into `~astropy.table.Table` # TODO: this is a temporary solution until we have a better way # to handle covariance matrices via a class Serialize parameter covariance into `~astropy.table.Table` List of parameter names Return dict of ufloats with covariance # convert existing parameters to ufloats Return list of free parameters names. Return list of frozen parameters names. # TODO: this is a temporary solution until we have a better way # to handle covariance matrices via a class Set uncorrelated parameters errors. Parameters ---------- errors : dict of `~astropy.units.Quantity` Dict of parameter errors. # TODO: this is a temporary solution until we have a better way # to handle covariance matrices via a class Set full correlated parameters errors. Parameters ---------- covariance : array-like Covariance matrix covar_axis : list List of strings defining the parameter order in covariance # TODO: make use of covariance matrix symmetry # TODO: this is a temporary solution until we have a better way # to handle covariance matrices via a class Return error on a given parameter Parameters ---------- parname : str Parameter A deep copy | 2.117154 | 2 |
organizers/forms.py | sujitnoronha/cresendo | 0 | 6617015 | <gh_stars>0
from django import forms
from clientpage.models import *
class OrganizerForm(forms.ModelForm):
class Meta:
model = Locations
fields = ['name','phonenumber','image', 'latitude', 'longitude', 'address','description']
| from django import forms
from clientpage.models import *
class OrganizerForm(forms.ModelForm):
class Meta:
model = Locations
fields = ['name','phonenumber','image', 'latitude', 'longitude', 'address','description'] | none | 1 | 1.927122 | 2 | |
setup.py | myh1000/MusicRepair-A-Pop | 0 | 6617016 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name='musicrepair',
version='6.0.4',
description='Lets you repair your music files by adding metadata and album art',
url='https://github.com/lakshaykalbhor/musicrepair',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
install_requires=[
'bs4',
'colorama',
'mutagen',
'spotipy',
'six',
'requests',
'configparser',
],
entry_points={
'console_scripts': ['musicrepair=musicrepair.command_line:main'],
},
package_data={'musicrepair':['config.ini']},
)
| <filename>setup.py
from setuptools import setup, find_packages
setup(
name='musicrepair',
version='6.0.4',
description='Lets you repair your music files by adding metadata and album art',
url='https://github.com/lakshaykalbhor/musicrepair',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
install_requires=[
'bs4',
'colorama',
'mutagen',
'spotipy',
'six',
'requests',
'configparser',
],
entry_points={
'console_scripts': ['musicrepair=musicrepair.command_line:main'],
},
package_data={'musicrepair':['config.ini']},
)
| none | 1 | 1.512711 | 2 | |
punkin/fields/__init__.py | artPlusPlus/punkin | 1 | 6617017 | from ._float_range import FloatRange
from ._formatted_string import FormattedString
from ._simple import Simple
from ._field import get_field_type | from ._float_range import FloatRange
from ._formatted_string import FormattedString
from ._simple import Simple
from ._field import get_field_type | none | 1 | 1.180761 | 1 | |
domain/useCases/ports/__init__.py | JVGC/MyFinancesPython | 0 | 6617018 | <gh_stars>0
from .DebtId import *
from .StartDate import *
| from .DebtId import *
from .StartDate import * | none | 1 | 1.130359 | 1 | |
tests/makePicsFromData.py | yaukwankiu/armor | 1 | 6617019 | <reponame>yaukwankiu/armor<filename>tests/makePicsFromData.py
import time
import os
from armor import objects4 as ob
comprefsList = [ob.monsoon, ob.may2014,ob.soulik,ob.march2014, ob.kongrey, ]
for ds in comprefsList:
outputFolder=ds.dataFolder+'.pics/'
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
ds.setImageFolder(outputFolder)
ds.saveImages(dpi=200, verbose=True, toLoad=True, drawCoast=True) | import time
import os
from armor import objects4 as ob
comprefsList = [ob.monsoon, ob.may2014,ob.soulik,ob.march2014, ob.kongrey, ]
for ds in comprefsList:
outputFolder=ds.dataFolder+'.pics/'
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
ds.setImageFolder(outputFolder)
ds.saveImages(dpi=200, verbose=True, toLoad=True, drawCoast=True) | none | 1 | 2.534002 | 3 | |
flask_together/views.py | jtcourtemarche/youtube-de-locke | 1 | 6617020 | <reponame>jtcourtemarche/youtube-de-locke<gh_stars>1-10
#!/usr/bin/python
from flask import Blueprint
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask_login import current_user
from flask_login import login_required
from flask_login import login_user
from flask_login import logout_user
import flask_together.models as models
from config import LASTFM_KEY
from extensions import fm
from extensions import login_manager
# Register these views with app
urls = Blueprint('urls', __name__)
# Login manager user handler
@login_manager.user_loader
def load_user(user_id):
return models.User.query.get(int(user_id))
# Make current_user a global variable
@urls.before_request
def before_request():
g.user = current_user
# Index page
@urls.route('/')
def lobby():
if g.user.is_authenticated:
return render_template(
'lobby.html',
user_rooms=models.Room.query.with_parent(g.user),
public_rooms=models.Room.query.filter_by(public=True).all()
)
return render_template('login.html')
@urls.route('/create/room', methods=['POST'])
@login_required
def create_room():
if request.form:
# TODO: use Flask-WTF forms for this
room = models.Room(
name=request.form['room_name']
)
models.db.session.add(room)
models.db.session.commit()
g.user.join_room(room)
return redirect('/')
# Standard viewing page
@urls.route('/watch/<int:room_id>')
@login_required
def room(room_id):
room = models.Room.query.filter_by(id=room_id).first()
if room and room.public:
if g.user not in room.users:
g.user.join_room(room)
return render_template('room.html', room=room, fm_enabled=fm.enabled)
return 'Room doesn\'t exist.'
# User history
@urls.route('/~<string:name>/history/<int:index>')
@urls.route('/~<string:name>/history')
@login_required
def user_history(name, index=1):
user = models.User.query.filter_by(name=name).first()
if user:
return render_template(
'history.html',
history=user.videos[25*(index-1):25*index]
)
else:
return 'User ' + name + ' does not exist.'
# User profiles
@urls.route('/~<string:name>')
@login_required
def user_profile(name):
user = models.User.query.filter_by(name=name).first()
if user:
if fm.enabled and user.lastfm_connected:
lastfm_data = fm.get_user(user.fm_name)
else:
lastfm_data = None
return render_template(
'profile.html',
user=user,
total_plays=len(user.videos),
most_played=user.most_played_video,
lastfm=lastfm_data
)
return 'User ' + name + ' does not exist.'
# Login view
# TODO: next redirect
@urls.route('/login', methods=['POST'])
def login():
if g.user.is_authenticated:
return redirect('/')
else:
username = models.User.query.filter_by(
name=request.form['username']).first()
if username:
if username.checkpass(request.form['password']):
login_user(username)
return redirect('/')
else:
return render_template('login.html', error='Invalid password')
else:
return render_template('login.html', error='Invalid username')
# Logout view
@urls.route('/logout')
@login_required
def logout():
logout_user()
return redirect('/')
# Redirect user to LastFM authentication page
@urls.route('/auth/lastfm')
@login_required
def auth_lastfm():
if not current_user.lastfm_connected():
return redirect(f'http://www.last.fm/api/auth/?api_key={LASTFM_KEY}')
return f'Your account {current_user.fm_name} is already connected'
# Register LastFM credentials into ytdl database
@urls.route('/register', methods=['GET'])
@login_required
def register():
if 'token' in request.args and len(request.args['token']) == 32:
token = request.args['token']
resp = fm.get_session(token)
if resp[0]:
# Register LastFM in DB
current_user.fm_name = resp[1]['name']
current_user.fm_token = token
current_user.fm_sk = resp[1]['key']
models.db.session.commit()
return '<span>Registered {}</span><br/><a href="/">Take me back</a>'.format(resp[1]['name'])
else:
return 'Error connecting to your LastFM account: {}'.format(resp[1]['message'])
else:
return 'Failed to connect to your LastFM'
# Page errors
@urls.errorhandler(404)
def page_not_found(error):
return redirect('/')
@urls.errorhandler(401)
def unauthorized(error):
return redirect('/')
| #!/usr/bin/python
from flask import Blueprint
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask_login import current_user
from flask_login import login_required
from flask_login import login_user
from flask_login import logout_user
import flask_together.models as models
from config import LASTFM_KEY
from extensions import fm
from extensions import login_manager
# Register these views with app
urls = Blueprint('urls', __name__)
# Login manager user handler
@login_manager.user_loader
def load_user(user_id):
return models.User.query.get(int(user_id))
# Make current_user a global variable
@urls.before_request
def before_request():
g.user = current_user
# Index page
@urls.route('/')
def lobby():
if g.user.is_authenticated:
return render_template(
'lobby.html',
user_rooms=models.Room.query.with_parent(g.user),
public_rooms=models.Room.query.filter_by(public=True).all()
)
return render_template('login.html')
@urls.route('/create/room', methods=['POST'])
@login_required
def create_room():
if request.form:
# TODO: use Flask-WTF forms for this
room = models.Room(
name=request.form['room_name']
)
models.db.session.add(room)
models.db.session.commit()
g.user.join_room(room)
return redirect('/')
# Standard viewing page
@urls.route('/watch/<int:room_id>')
@login_required
def room(room_id):
room = models.Room.query.filter_by(id=room_id).first()
if room and room.public:
if g.user not in room.users:
g.user.join_room(room)
return render_template('room.html', room=room, fm_enabled=fm.enabled)
return 'Room doesn\'t exist.'
# User history
@urls.route('/~<string:name>/history/<int:index>')
@urls.route('/~<string:name>/history')
@login_required
def user_history(name, index=1):
user = models.User.query.filter_by(name=name).first()
if user:
return render_template(
'history.html',
history=user.videos[25*(index-1):25*index]
)
else:
return 'User ' + name + ' does not exist.'
# User profiles
@urls.route('/~<string:name>')
@login_required
def user_profile(name):
user = models.User.query.filter_by(name=name).first()
if user:
if fm.enabled and user.lastfm_connected:
lastfm_data = fm.get_user(user.fm_name)
else:
lastfm_data = None
return render_template(
'profile.html',
user=user,
total_plays=len(user.videos),
most_played=user.most_played_video,
lastfm=lastfm_data
)
return 'User ' + name + ' does not exist.'
# Login view
# TODO: next redirect
@urls.route('/login', methods=['POST'])
def login():
if g.user.is_authenticated:
return redirect('/')
else:
username = models.User.query.filter_by(
name=request.form['username']).first()
if username:
if username.checkpass(request.form['password']):
login_user(username)
return redirect('/')
else:
return render_template('login.html', error='Invalid password')
else:
return render_template('login.html', error='Invalid username')
# Logout view
@urls.route('/logout')
@login_required
def logout():
logout_user()
return redirect('/')
# Redirect user to LastFM authentication page
@urls.route('/auth/lastfm')
@login_required
def auth_lastfm():
if not current_user.lastfm_connected():
return redirect(f'http://www.last.fm/api/auth/?api_key={LASTFM_KEY}')
return f'Your account {current_user.fm_name} is already connected'
# Register LastFM credentials into ytdl database
@urls.route('/register', methods=['GET'])
@login_required
def register():
if 'token' in request.args and len(request.args['token']) == 32:
token = request.args['token']
resp = fm.get_session(token)
if resp[0]:
# Register LastFM in DB
current_user.fm_name = resp[1]['name']
current_user.fm_token = token
current_user.fm_sk = resp[1]['key']
models.db.session.commit()
return '<span>Registered {}</span><br/><a href="/">Take me back</a>'.format(resp[1]['name'])
else:
return 'Error connecting to your LastFM account: {}'.format(resp[1]['message'])
else:
return 'Failed to connect to your LastFM'
# Page errors
@urls.errorhandler(404)
def page_not_found(error):
return redirect('/')
@urls.errorhandler(401)
def unauthorized(error):
return redirect('/') | en | 0.691013 | #!/usr/bin/python # Register these views with app # Login manager user handler # Make current_user a global variable # Index page # TODO: use Flask-WTF forms for this # Standard viewing page # User history # User profiles # Login view # TODO: next redirect # Logout view # Redirect user to LastFM authentication page # Register LastFM credentials into ytdl database # Register LastFM in DB # Page errors | 2.22148 | 2 |
dev/06_21_2018/UPS_Error.py | npwebste/UPS_Controller | 0 | 6617021 | <gh_stars>0
# Universal Power Supply Controller
# USAID Middle East Water Security Initiative
#
# Developed by: <NAME>
# Primary Investigator: <NAME>
#
# Version History (mm_dd_yyyy)
# 1.00 03_24_2018_NW
#
######################################################
def UPS_Error(ErrorCode):
if (ErrorCode == 'Error_VFD_Freq'):
print('VFD frequency set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Volt':
print('VFD votlage set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Amps':
print('VFD current set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Power':
print('VFD power set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_BusVolt':
print('VFD bus voltage set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Temp':
print('VFD temperature set above maximum, shutting down motor')
elif ErrorCode == 'Error_Solar_Voltage':
print('Solar voltage set above maximum, shutting down converter and motor')
elif ErrorCode == 'Error_DC_Link_Voltage':
print('DC link voltage set above maximum, shutting down converter and motor')
elif ErrorCode == 'Error_Voltage_Measurement':
print('Incorrect voltage measurement input')
elif ErrorCode == 'Error_Transfer_Switch':
print('Incorrect transfer switch input')
elif ErrorCode == 'Error_VFD_Power':
print('Incorrect power calculation')
elif ErrorCode == 'Error_Duty_Cycle':
print('Incorrect power calculation')
return | # Universal Power Supply Controller
# USAID Middle East Water Security Initiative
#
# Developed by: <NAME>
# Primary Investigator: <NAME>
#
# Version History (mm_dd_yyyy)
# 1.00 03_24_2018_NW
#
######################################################
def UPS_Error(ErrorCode):
if (ErrorCode == 'Error_VFD_Freq'):
print('VFD frequency set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Volt':
print('VFD votlage set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Amps':
print('VFD current set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Power':
print('VFD power set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_BusVolt':
print('VFD bus voltage set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Temp':
print('VFD temperature set above maximum, shutting down motor')
elif ErrorCode == 'Error_Solar_Voltage':
print('Solar voltage set above maximum, shutting down converter and motor')
elif ErrorCode == 'Error_DC_Link_Voltage':
print('DC link voltage set above maximum, shutting down converter and motor')
elif ErrorCode == 'Error_Voltage_Measurement':
print('Incorrect voltage measurement input')
elif ErrorCode == 'Error_Transfer_Switch':
print('Incorrect transfer switch input')
elif ErrorCode == 'Error_VFD_Power':
print('Incorrect power calculation')
elif ErrorCode == 'Error_Duty_Cycle':
print('Incorrect power calculation')
return | en | 0.348425 | # Universal Power Supply Controller # USAID Middle East Water Security Initiative # # Developed by: <NAME> # Primary Investigator: <NAME> # # Version History (mm_dd_yyyy) # 1.00 03_24_2018_NW # ###################################################### | 2.436053 | 2 |
online_pharmacy/customer/admin.py | geekyJock8/online_pharmacy | 5 | 6617022 | <filename>online_pharmacy/customer/admin.py<gh_stars>1-10
from django.contrib import admin
from .models import customer,address_list,contact_customer,customer_notifications
admin.site.register(customer)
admin.site.register(address_list)
admin.site.register(contact_customer)
admin.site.register(customer_notifications)
| <filename>online_pharmacy/customer/admin.py<gh_stars>1-10
from django.contrib import admin
from .models import customer,address_list,contact_customer,customer_notifications
admin.site.register(customer)
admin.site.register(address_list)
admin.site.register(contact_customer)
admin.site.register(customer_notifications)
| none | 1 | 1.425082 | 1 | |
elastic_transport/_transport.py | elastic/elastic-transport-python | 7 | 6617023 | <reponame>elastic/elastic-transport-python
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import dataclasses
import time
import warnings
from platform import python_version
from typing import (
Any,
Callable,
Collection,
Dict,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
cast,
)
from ._compat import Lock, warn_stacklevel
from ._exceptions import (
ConnectionError,
ConnectionTimeout,
SniffingError,
TransportError,
TransportWarning,
)
from ._models import (
DEFAULT,
ApiResponseMeta,
DefaultType,
HttpHeaders,
NodeConfig,
SniffOptions,
)
from ._node import AiohttpHttpNode, BaseNode, RequestsHttpNode, Urllib3HttpNode
from ._node_pool import NodePool, NodeSelector
from ._serializer import DEFAULT_SERIALIZERS, Serializer, SerializerCollection
from ._version import __version__
from .client_utils import client_meta_version, resolve_default
# Allows for using a node_class by name rather than import.
NODE_CLASS_NAMES: Dict[str, Type[BaseNode]] = {
"urllib3": Urllib3HttpNode,
"requests": RequestsHttpNode,
"aiohttp": AiohttpHttpNode,
}
# These are HTTP status errors that shouldn't be considered
# 'errors' for marking a node as dead. These errors typically
# mean everything is fine server-wise and instead the API call
# in question responded successfully.
NOT_DEAD_NODE_HTTP_STATUSES = {None, 400, 402, 401, 403, 404}
DEFAULT_CLIENT_META_SERVICE = ("et", client_meta_version(__version__))
class Transport:
"""
Encapsulation of transport-related to logic. Handles instantiation of the
individual nodes as well as creating a node pool to hold them.
Main interface is the :meth:`elastic_transport.Transport.perform_request` method.
"""
def __init__(
self,
node_configs: List[NodeConfig],
node_class: Union[str, Type[BaseNode]] = Urllib3HttpNode,
node_pool_class: Type[NodePool] = NodePool,
randomize_nodes_in_pool: bool = True,
node_selector_class: Optional[Union[str, Type[NodeSelector]]] = None,
dead_node_backoff_factor: Optional[float] = None,
max_dead_node_backoff: Optional[float] = None,
serializers: Optional[Mapping[str, Serializer]] = None,
default_mimetype: str = "application/json",
max_retries: int = 3,
retry_on_status: Collection[int] = (429, 502, 503, 504),
retry_on_timeout: bool = False,
sniff_on_start: bool = False,
sniff_before_requests: bool = False,
sniff_on_node_failure: bool = False,
sniff_timeout: Optional[float] = 0.5,
min_delay_between_sniffing: float = 10.0,
sniff_callback: Optional[
Callable[
["Transport", "SniffOptions"],
Union[List[NodeConfig], List[NodeConfig]],
]
] = None,
meta_header: bool = True,
client_meta_service: Tuple[str, str] = DEFAULT_CLIENT_META_SERVICE,
) -> None:
"""
:arg node_configs: List of 'NodeConfig' instances to create initial set of nodes.
:arg node_class: subclass of :class:`~elastic_transport.BaseNode` to use
or the name of the Connection (ie 'urllib3', 'requests')
:arg node_pool_class: subclass of :class:`~elastic_transport.NodePool` to use
:arg randomize_nodes_in_pool: Set to false to not randomize nodes within the pool.
Defaults to true.
:arg node_selector_class: Class to be used to select nodes within
the :class:`~elastic_transport.NodePool`.
:arg dead_node_backoff_factor: Exponential backoff factor to calculate the amount
of time to timeout a node after an unsuccessful API call.
:arg max_dead_node_backoff: Maximum amount of time to timeout a node after an
unsuccessful API call.
:arg serializers: optional dict of serializer instances that will be
used for deserializing data coming from the server. (key is the mimetype)
:arg max_retries: Maximum number of retries for an API call.
Set to 0 to disable retries. Defaults to ``0``.
:arg retry_on_status: set of HTTP status codes on which we should retry
on a different node. defaults to ``(429, 502, 503, 504)``
:arg retry_on_timeout: should timeout trigger a retry on different
node? (default ``False``)
:arg sniff_on_start: If ``True`` will sniff for additional nodes as soon
as possible, guaranteed before the first request.
:arg sniff_on_node_failure: If ``True`` will sniff for additional nodees
after a node is marked as dead in the pool.
:arg sniff_before_requests: If ``True`` will occasionally sniff for additional
nodes as requests are sent.
:arg sniff_timeout: Timeout value in seconds to use for sniffing requests.
Defaults to 1 second.
:arg min_delay_between_sniffing: Number of seconds to wait between calls to
:meth:`elastic_transport.Transport.sniff` to avoid sniffing too frequently.
Defaults to 10 seconds.
:arg sniff_callback: Function that is passed a :class:`elastic_transport.Transport` and
:class:`elastic_transport.SniffOptions` and should do node discovery and
return a list of :class:`elastic_transport.NodeConfig` instances.
:arg meta_header: If set to False the ``X-Elastic-Client-Meta`` HTTP header won't be sent.
Defaults to True.
:arg client_meta_service: Key-value pair for the service field of the client metadata header.
Defaults to the service key-value for Elastic Transport.
"""
if isinstance(node_class, str):
if node_class not in NODE_CLASS_NAMES:
options = "', '".join(sorted(NODE_CLASS_NAMES.keys()))
raise ValueError(
f"Unknown option for node_class: '{node_class}'. "
f"Available options are: '{options}'"
)
node_class = NODE_CLASS_NAMES[node_class]
validate_sniffing_options(
node_configs=node_configs,
sniff_on_start=sniff_on_start,
sniff_before_requests=sniff_before_requests,
sniff_on_node_failure=sniff_on_node_failure,
sniff_callback=sniff_callback,
)
# Create the default metadata for the x-elastic-client-meta
# HTTP header. Only requires adding the (service, service_version)
# tuple to the beginning of the client_meta
self._transport_client_meta: Tuple[Tuple[str, str], ...] = (
client_meta_service,
("py", client_meta_version(python_version())),
("t", client_meta_version(__version__)),
)
# Grab the 'HTTP_CLIENT_META' property from the node class
http_client_meta = cast(
Optional[Tuple[str, str]],
getattr(node_class, "_CLIENT_META_HTTP_CLIENT", None),
)
if http_client_meta:
self._transport_client_meta += (http_client_meta,)
if not isinstance(meta_header, bool):
raise TypeError("'meta_header' must be of type bool")
self.meta_header = meta_header
# serialization config
_serializers = DEFAULT_SERIALIZERS.copy()
# if custom serializers map has been supplied, override the defaults with it
if serializers:
_serializers.update(serializers)
# Create our collection of serializers
self.serializers = SerializerCollection(
_serializers, default_mimetype=default_mimetype
)
# Set of default request options
self.max_retries = max_retries
self.retry_on_status = retry_on_status
self.retry_on_timeout = retry_on_timeout
# Build the NodePool from all the options
node_pool_kwargs: Dict[str, Any] = {}
if node_selector_class is not None:
node_pool_kwargs["node_selector_class"] = node_selector_class
if dead_node_backoff_factor is not None:
node_pool_kwargs["dead_node_backoff_factor"] = dead_node_backoff_factor
if max_dead_node_backoff is not None:
node_pool_kwargs["max_dead_node_backoff"] = max_dead_node_backoff
self.node_pool: NodePool = node_pool_class(
node_configs,
node_class=node_class,
randomize_nodes=randomize_nodes_in_pool,
**node_pool_kwargs,
)
self._sniff_on_start = sniff_on_start
self._sniff_before_requests = sniff_before_requests
self._sniff_on_node_failure = sniff_on_node_failure
self._sniff_timeout = sniff_timeout
self._sniff_callback = sniff_callback
self._sniffing_lock = Lock() # Used to track whether we're currently sniffing.
self._min_delay_between_sniffing = min_delay_between_sniffing
self._last_sniffed_at = 0.0
if sniff_on_start:
self.sniff(True)
def perform_request( # type: ignore[override,return]
self,
method: str,
target: str,
*,
body: Optional[Any] = None,
headers: Union[Mapping[str, Any], DefaultType] = DEFAULT,
max_retries: Union[int, DefaultType] = DEFAULT,
retry_on_status: Union[Collection[int], DefaultType] = DEFAULT,
retry_on_timeout: Union[bool, DefaultType] = DEFAULT,
request_timeout: Union[Optional[float], DefaultType] = DEFAULT,
client_meta: Union[Tuple[Tuple[str, str], ...]] = DEFAULT,
) -> Tuple[ApiResponseMeta, Any]:
"""
Perform the actual request. Retrieve a node from the node
pool, pass all the information to it's perform_request method and
return the data.
If an exception was raised, mark the node as failed and retry (up
to ``max_retries`` times).
If the operation was successful and the node used was previously
marked as dead, mark it as live, resetting it's failure count.
:arg method: HTTP method to use
:arg target: HTTP request target
:arg body: body of the request, will be serialized using serializer and
passed to the node
:arg headers: Additional headers to send with the request.
:arg max_retries: Maximum number of retries before giving up on a request.
Set to ``0`` to disable retries.
:arg retry_on_status: Collection of HTTP status codes to retry.
:arg retry_on_timeout: Set to true to retry after timeout errors.
:arg request_timeout: Amount of time to wait for a response to fail with a timeout error.
:arg client_meta: Extra client metadata key-value pairs to send in the client meta header.
:returns: Tuple of the :class:`elastic_transport.ApiResponseMeta` with the deserialized response.
"""
if headers is DEFAULT:
request_headers = HttpHeaders()
else:
request_headers = HttpHeaders(headers)
max_retries = resolve_default(max_retries, self.max_retries)
retry_on_timeout = resolve_default(retry_on_timeout, self.retry_on_timeout)
retry_on_status = resolve_default(retry_on_status, self.retry_on_status)
if self.meta_header:
request_headers["x-elastic-client-meta"] = ",".join(
f"{k}={v}"
for k, v in self._transport_client_meta
+ resolve_default(client_meta, ())
)
# Serialize the request body to bytes based on the given mimetype.
request_body: Optional[bytes]
if body is not None:
if "content-type" not in request_headers:
raise ValueError(
"Must provide a 'Content-Type' header to requests with bodies"
)
request_body = self.serializers.dumps(
body, mimetype=request_headers["content-type"]
)
else:
request_body = None
# Errors are stored from (oldest->newest)
errors: List[Exception] = []
for attempt in range(max_retries + 1):
# If we sniff before requests are made we want to do so before
# 'node_pool.get()' is called so our sniffed nodes show up in the pool.
if self._sniff_before_requests:
self.sniff(False)
retry = False
node_failure = False
last_response: Optional[Tuple[ApiResponseMeta, Any]] = None
node = self.node_pool.get()
try:
meta, raw_data = node.perform_request(
method,
target,
body=request_body,
headers=request_headers,
request_timeout=request_timeout,
)
if raw_data not in (None, b""):
data = self.serializers.loads(raw_data, meta.mimetype)
else:
data = None
if meta.status in retry_on_status:
retry = True
# Keep track of the last response we see so we can return
# it in case the retried request returns with a transport error.
last_response = (meta, data)
except TransportError as e:
if isinstance(e, ConnectionTimeout):
retry = retry_on_timeout
node_failure = True
elif isinstance(e, ConnectionError):
retry = True
node_failure = True
# If the error was determined to be a node failure
# we mark it dead in the node pool to allow for
# other nodes to be retried.
if node_failure:
self.node_pool.mark_dead(node)
if self._sniff_on_node_failure:
try:
self.sniff(False)
except TransportError:
# If sniffing on failure, it could fail too. Catch the
# exception not to interrupt the retries.
pass
if not retry or attempt >= max_retries:
# Since we're exhausted but we have previously
# received some sort of response from the API
# we should forward that along instead of the
# transport error. Likely to be more actionable.
if last_response is not None:
return last_response
e.errors = tuple(errors)
raise
else:
errors.append(e)
else:
# If we got back a response we need to check if that status
# is indicative of a healthy node even if it's a non-2XX status
if (
200 <= meta.status < 299
or meta.status in NOT_DEAD_NODE_HTTP_STATUSES
):
self.node_pool.mark_live(node)
else:
self.node_pool.mark_dead(node)
if self._sniff_on_node_failure:
try:
self.sniff(False)
except TransportError:
# If sniffing on failure, it could fail too. Catch the
# exception not to interrupt the retries.
pass
# We either got a response we're happy with or
# we've exhausted all of our retries so we return it.
if not retry or attempt >= max_retries:
return meta, data
def sniff(self, is_initial_sniff: bool = False) -> None:
previously_sniffed_at = self._last_sniffed_at
should_sniff = self._should_sniff(is_initial_sniff)
try:
if should_sniff:
self._last_sniffed_at = time.time()
options = SniffOptions(
is_initial_sniff=is_initial_sniff, sniff_timeout=self._sniff_timeout
)
assert self._sniff_callback is not None
node_configs = self._sniff_callback(self, options)
if not node_configs and is_initial_sniff:
raise SniffingError(
"No viable nodes were discovered on the initial sniff attempt"
)
for node_config in node_configs:
self.node_pool.add(node_config)
# If sniffing failed for any reason we
# want to allow retrying immediately.
except Exception:
self._last_sniffed_at = previously_sniffed_at
raise
# If we started a sniff we need to release the lock.
finally:
if should_sniff:
self._sniffing_lock.release()
def close(self) -> None:
"""
Explicitly closes all nodes in the transport's pool
"""
for node in self.node_pool.all():
node.close()
def _should_sniff(self, is_initial_sniff: bool) -> bool:
"""Decide if we should sniff or not. If we return ``True`` from this
method the caller has a responsibility to unlock the ``_sniffing_lock``
"""
if not is_initial_sniff and (
time.time() - self._last_sniffed_at < self._min_delay_between_sniffing
):
return False
return self._sniffing_lock.acquire(False)
def validate_sniffing_options(
*,
node_configs: List[NodeConfig],
sniff_before_requests: bool,
sniff_on_start: bool,
sniff_on_node_failure: bool,
sniff_callback: Optional[Any],
) -> None:
"""Validates the Transport configurations for sniffing"""
sniffing_enabled = sniff_before_requests or sniff_on_start or sniff_on_node_failure
if sniffing_enabled and not sniff_callback:
raise ValueError("Enabling sniffing requires specifying a 'sniff_callback'")
if not sniffing_enabled and sniff_callback:
raise ValueError(
"Using 'sniff_callback' requires enabling sniffing via 'sniff_on_start', "
"'sniff_before_requests' or 'sniff_on_node_failure'"
)
# If we're sniffing we want to warn the user for non-homogenous NodeConfigs.
if sniffing_enabled and len(node_configs) > 1:
warn_if_varying_node_config_options(node_configs)
def warn_if_varying_node_config_options(node_configs: List[NodeConfig]) -> None:
"""Function which detects situations when sniffing may produce incorrect configs"""
exempt_attrs = {"host", "port", "connections_per_node", "_extras"}
match_attr_dict = None
for node_config in node_configs:
attr_dict = {
k: v
for k, v in dataclasses.asdict(node_config).items()
if k not in exempt_attrs
}
if match_attr_dict is None:
match_attr_dict = attr_dict
# Detected two nodes that have different config, warn the user.
elif match_attr_dict != attr_dict:
warnings.warn(
"Detected NodeConfig instances with different options. "
"It's recommended to keep all options except for "
"'host' and 'port' the same for sniffing to work reliably.",
category=TransportWarning,
stacklevel=warn_stacklevel(),
)
| # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import dataclasses
import time
import warnings
from platform import python_version
from typing import (
Any,
Callable,
Collection,
Dict,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
cast,
)
from ._compat import Lock, warn_stacklevel
from ._exceptions import (
ConnectionError,
ConnectionTimeout,
SniffingError,
TransportError,
TransportWarning,
)
from ._models import (
DEFAULT,
ApiResponseMeta,
DefaultType,
HttpHeaders,
NodeConfig,
SniffOptions,
)
from ._node import AiohttpHttpNode, BaseNode, RequestsHttpNode, Urllib3HttpNode
from ._node_pool import NodePool, NodeSelector
from ._serializer import DEFAULT_SERIALIZERS, Serializer, SerializerCollection
from ._version import __version__
from .client_utils import client_meta_version, resolve_default
# Allows for using a node_class by name rather than import.
NODE_CLASS_NAMES: Dict[str, Type[BaseNode]] = {
"urllib3": Urllib3HttpNode,
"requests": RequestsHttpNode,
"aiohttp": AiohttpHttpNode,
}
# These are HTTP status errors that shouldn't be considered
# 'errors' for marking a node as dead. These errors typically
# mean everything is fine server-wise and instead the API call
# in question responded successfully.
NOT_DEAD_NODE_HTTP_STATUSES = {None, 400, 402, 401, 403, 404}
DEFAULT_CLIENT_META_SERVICE = ("et", client_meta_version(__version__))
class Transport:
"""
Encapsulation of transport-related to logic. Handles instantiation of the
individual nodes as well as creating a node pool to hold them.
Main interface is the :meth:`elastic_transport.Transport.perform_request` method.
"""
def __init__(
self,
node_configs: List[NodeConfig],
node_class: Union[str, Type[BaseNode]] = Urllib3HttpNode,
node_pool_class: Type[NodePool] = NodePool,
randomize_nodes_in_pool: bool = True,
node_selector_class: Optional[Union[str, Type[NodeSelector]]] = None,
dead_node_backoff_factor: Optional[float] = None,
max_dead_node_backoff: Optional[float] = None,
serializers: Optional[Mapping[str, Serializer]] = None,
default_mimetype: str = "application/json",
max_retries: int = 3,
retry_on_status: Collection[int] = (429, 502, 503, 504),
retry_on_timeout: bool = False,
sniff_on_start: bool = False,
sniff_before_requests: bool = False,
sniff_on_node_failure: bool = False,
sniff_timeout: Optional[float] = 0.5,
min_delay_between_sniffing: float = 10.0,
sniff_callback: Optional[
Callable[
["Transport", "SniffOptions"],
Union[List[NodeConfig], List[NodeConfig]],
]
] = None,
meta_header: bool = True,
client_meta_service: Tuple[str, str] = DEFAULT_CLIENT_META_SERVICE,
) -> None:
"""
:arg node_configs: List of 'NodeConfig' instances to create initial set of nodes.
:arg node_class: subclass of :class:`~elastic_transport.BaseNode` to use
or the name of the Connection (ie 'urllib3', 'requests')
:arg node_pool_class: subclass of :class:`~elastic_transport.NodePool` to use
:arg randomize_nodes_in_pool: Set to false to not randomize nodes within the pool.
Defaults to true.
:arg node_selector_class: Class to be used to select nodes within
the :class:`~elastic_transport.NodePool`.
:arg dead_node_backoff_factor: Exponential backoff factor to calculate the amount
of time to timeout a node after an unsuccessful API call.
:arg max_dead_node_backoff: Maximum amount of time to timeout a node after an
unsuccessful API call.
:arg serializers: optional dict of serializer instances that will be
used for deserializing data coming from the server. (key is the mimetype)
:arg max_retries: Maximum number of retries for an API call.
Set to 0 to disable retries. Defaults to ``0``.
:arg retry_on_status: set of HTTP status codes on which we should retry
on a different node. defaults to ``(429, 502, 503, 504)``
:arg retry_on_timeout: should timeout trigger a retry on different
node? (default ``False``)
:arg sniff_on_start: If ``True`` will sniff for additional nodes as soon
as possible, guaranteed before the first request.
:arg sniff_on_node_failure: If ``True`` will sniff for additional nodees
after a node is marked as dead in the pool.
:arg sniff_before_requests: If ``True`` will occasionally sniff for additional
nodes as requests are sent.
:arg sniff_timeout: Timeout value in seconds to use for sniffing requests.
Defaults to 1 second.
:arg min_delay_between_sniffing: Number of seconds to wait between calls to
:meth:`elastic_transport.Transport.sniff` to avoid sniffing too frequently.
Defaults to 10 seconds.
:arg sniff_callback: Function that is passed a :class:`elastic_transport.Transport` and
:class:`elastic_transport.SniffOptions` and should do node discovery and
return a list of :class:`elastic_transport.NodeConfig` instances.
:arg meta_header: If set to False the ``X-Elastic-Client-Meta`` HTTP header won't be sent.
Defaults to True.
:arg client_meta_service: Key-value pair for the service field of the client metadata header.
Defaults to the service key-value for Elastic Transport.
"""
if isinstance(node_class, str):
if node_class not in NODE_CLASS_NAMES:
options = "', '".join(sorted(NODE_CLASS_NAMES.keys()))
raise ValueError(
f"Unknown option for node_class: '{node_class}'. "
f"Available options are: '{options}'"
)
node_class = NODE_CLASS_NAMES[node_class]
validate_sniffing_options(
node_configs=node_configs,
sniff_on_start=sniff_on_start,
sniff_before_requests=sniff_before_requests,
sniff_on_node_failure=sniff_on_node_failure,
sniff_callback=sniff_callback,
)
# Create the default metadata for the x-elastic-client-meta
# HTTP header. Only requires adding the (service, service_version)
# tuple to the beginning of the client_meta
self._transport_client_meta: Tuple[Tuple[str, str], ...] = (
client_meta_service,
("py", client_meta_version(python_version())),
("t", client_meta_version(__version__)),
)
# Grab the 'HTTP_CLIENT_META' property from the node class
http_client_meta = cast(
Optional[Tuple[str, str]],
getattr(node_class, "_CLIENT_META_HTTP_CLIENT", None),
)
if http_client_meta:
self._transport_client_meta += (http_client_meta,)
if not isinstance(meta_header, bool):
raise TypeError("'meta_header' must be of type bool")
self.meta_header = meta_header
# serialization config
_serializers = DEFAULT_SERIALIZERS.copy()
# if custom serializers map has been supplied, override the defaults with it
if serializers:
_serializers.update(serializers)
# Create our collection of serializers
self.serializers = SerializerCollection(
_serializers, default_mimetype=default_mimetype
)
# Set of default request options
self.max_retries = max_retries
self.retry_on_status = retry_on_status
self.retry_on_timeout = retry_on_timeout
# Build the NodePool from all the options
node_pool_kwargs: Dict[str, Any] = {}
if node_selector_class is not None:
node_pool_kwargs["node_selector_class"] = node_selector_class
if dead_node_backoff_factor is not None:
node_pool_kwargs["dead_node_backoff_factor"] = dead_node_backoff_factor
if max_dead_node_backoff is not None:
node_pool_kwargs["max_dead_node_backoff"] = max_dead_node_backoff
self.node_pool: NodePool = node_pool_class(
node_configs,
node_class=node_class,
randomize_nodes=randomize_nodes_in_pool,
**node_pool_kwargs,
)
self._sniff_on_start = sniff_on_start
self._sniff_before_requests = sniff_before_requests
self._sniff_on_node_failure = sniff_on_node_failure
self._sniff_timeout = sniff_timeout
self._sniff_callback = sniff_callback
self._sniffing_lock = Lock() # Used to track whether we're currently sniffing.
self._min_delay_between_sniffing = min_delay_between_sniffing
self._last_sniffed_at = 0.0
if sniff_on_start:
self.sniff(True)
def perform_request( # type: ignore[override,return]
self,
method: str,
target: str,
*,
body: Optional[Any] = None,
headers: Union[Mapping[str, Any], DefaultType] = DEFAULT,
max_retries: Union[int, DefaultType] = DEFAULT,
retry_on_status: Union[Collection[int], DefaultType] = DEFAULT,
retry_on_timeout: Union[bool, DefaultType] = DEFAULT,
request_timeout: Union[Optional[float], DefaultType] = DEFAULT,
client_meta: Union[Tuple[Tuple[str, str], ...]] = DEFAULT,
) -> Tuple[ApiResponseMeta, Any]:
"""
Perform the actual request. Retrieve a node from the node
pool, pass all the information to it's perform_request method and
return the data.
If an exception was raised, mark the node as failed and retry (up
to ``max_retries`` times).
If the operation was successful and the node used was previously
marked as dead, mark it as live, resetting it's failure count.
:arg method: HTTP method to use
:arg target: HTTP request target
:arg body: body of the request, will be serialized using serializer and
passed to the node
:arg headers: Additional headers to send with the request.
:arg max_retries: Maximum number of retries before giving up on a request.
Set to ``0`` to disable retries.
:arg retry_on_status: Collection of HTTP status codes to retry.
:arg retry_on_timeout: Set to true to retry after timeout errors.
:arg request_timeout: Amount of time to wait for a response to fail with a timeout error.
:arg client_meta: Extra client metadata key-value pairs to send in the client meta header.
:returns: Tuple of the :class:`elastic_transport.ApiResponseMeta` with the deserialized response.
"""
if headers is DEFAULT:
request_headers = HttpHeaders()
else:
request_headers = HttpHeaders(headers)
max_retries = resolve_default(max_retries, self.max_retries)
retry_on_timeout = resolve_default(retry_on_timeout, self.retry_on_timeout)
retry_on_status = resolve_default(retry_on_status, self.retry_on_status)
if self.meta_header:
request_headers["x-elastic-client-meta"] = ",".join(
f"{k}={v}"
for k, v in self._transport_client_meta
+ resolve_default(client_meta, ())
)
# Serialize the request body to bytes based on the given mimetype.
request_body: Optional[bytes]
if body is not None:
if "content-type" not in request_headers:
raise ValueError(
"Must provide a 'Content-Type' header to requests with bodies"
)
request_body = self.serializers.dumps(
body, mimetype=request_headers["content-type"]
)
else:
request_body = None
# Errors are stored from (oldest->newest)
errors: List[Exception] = []
for attempt in range(max_retries + 1):
# If we sniff before requests are made we want to do so before
# 'node_pool.get()' is called so our sniffed nodes show up in the pool.
if self._sniff_before_requests:
self.sniff(False)
retry = False
node_failure = False
last_response: Optional[Tuple[ApiResponseMeta, Any]] = None
node = self.node_pool.get()
try:
meta, raw_data = node.perform_request(
method,
target,
body=request_body,
headers=request_headers,
request_timeout=request_timeout,
)
if raw_data not in (None, b""):
data = self.serializers.loads(raw_data, meta.mimetype)
else:
data = None
if meta.status in retry_on_status:
retry = True
# Keep track of the last response we see so we can return
# it in case the retried request returns with a transport error.
last_response = (meta, data)
except TransportError as e:
if isinstance(e, ConnectionTimeout):
retry = retry_on_timeout
node_failure = True
elif isinstance(e, ConnectionError):
retry = True
node_failure = True
# If the error was determined to be a node failure
# we mark it dead in the node pool to allow for
# other nodes to be retried.
if node_failure:
self.node_pool.mark_dead(node)
if self._sniff_on_node_failure:
try:
self.sniff(False)
except TransportError:
# If sniffing on failure, it could fail too. Catch the
# exception not to interrupt the retries.
pass
if not retry or attempt >= max_retries:
# Since we're exhausted but we have previously
# received some sort of response from the API
# we should forward that along instead of the
# transport error. Likely to be more actionable.
if last_response is not None:
return last_response
e.errors = tuple(errors)
raise
else:
errors.append(e)
else:
# If we got back a response we need to check if that status
# is indicative of a healthy node even if it's a non-2XX status
if (
200 <= meta.status < 299
or meta.status in NOT_DEAD_NODE_HTTP_STATUSES
):
self.node_pool.mark_live(node)
else:
self.node_pool.mark_dead(node)
if self._sniff_on_node_failure:
try:
self.sniff(False)
except TransportError:
# If sniffing on failure, it could fail too. Catch the
# exception not to interrupt the retries.
pass
# We either got a response we're happy with or
# we've exhausted all of our retries so we return it.
if not retry or attempt >= max_retries:
return meta, data
def sniff(self, is_initial_sniff: bool = False) -> None:
previously_sniffed_at = self._last_sniffed_at
should_sniff = self._should_sniff(is_initial_sniff)
try:
if should_sniff:
self._last_sniffed_at = time.time()
options = SniffOptions(
is_initial_sniff=is_initial_sniff, sniff_timeout=self._sniff_timeout
)
assert self._sniff_callback is not None
node_configs = self._sniff_callback(self, options)
if not node_configs and is_initial_sniff:
raise SniffingError(
"No viable nodes were discovered on the initial sniff attempt"
)
for node_config in node_configs:
self.node_pool.add(node_config)
# If sniffing failed for any reason we
# want to allow retrying immediately.
except Exception:
self._last_sniffed_at = previously_sniffed_at
raise
# If we started a sniff we need to release the lock.
finally:
if should_sniff:
self._sniffing_lock.release()
def close(self) -> None:
"""
Explicitly closes all nodes in the transport's pool
"""
for node in self.node_pool.all():
node.close()
def _should_sniff(self, is_initial_sniff: bool) -> bool:
"""Decide if we should sniff or not. If we return ``True`` from this
method the caller has a responsibility to unlock the ``_sniffing_lock``
"""
if not is_initial_sniff and (
time.time() - self._last_sniffed_at < self._min_delay_between_sniffing
):
return False
return self._sniffing_lock.acquire(False)
def validate_sniffing_options(
*,
node_configs: List[NodeConfig],
sniff_before_requests: bool,
sniff_on_start: bool,
sniff_on_node_failure: bool,
sniff_callback: Optional[Any],
) -> None:
"""Validates the Transport configurations for sniffing"""
sniffing_enabled = sniff_before_requests or sniff_on_start or sniff_on_node_failure
if sniffing_enabled and not sniff_callback:
raise ValueError("Enabling sniffing requires specifying a 'sniff_callback'")
if not sniffing_enabled and sniff_callback:
raise ValueError(
"Using 'sniff_callback' requires enabling sniffing via 'sniff_on_start', "
"'sniff_before_requests' or 'sniff_on_node_failure'"
)
# If we're sniffing we want to warn the user for non-homogenous NodeConfigs.
if sniffing_enabled and len(node_configs) > 1:
warn_if_varying_node_config_options(node_configs)
def warn_if_varying_node_config_options(node_configs: List[NodeConfig]) -> None:
"""Function which detects situations when sniffing may produce incorrect configs"""
exempt_attrs = {"host", "port", "connections_per_node", "_extras"}
match_attr_dict = None
for node_config in node_configs:
attr_dict = {
k: v
for k, v in dataclasses.asdict(node_config).items()
if k not in exempt_attrs
}
if match_attr_dict is None:
match_attr_dict = attr_dict
# Detected two nodes that have different config, warn the user.
elif match_attr_dict != attr_dict:
warnings.warn(
"Detected NodeConfig instances with different options. "
"It's recommended to keep all options except for "
"'host' and 'port' the same for sniffing to work reliably.",
category=TransportWarning,
stacklevel=warn_stacklevel(),
) | en | 0.850634 | # Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Allows for using a node_class by name rather than import. # These are HTTP status errors that shouldn't be considered # 'errors' for marking a node as dead. These errors typically # mean everything is fine server-wise and instead the API call # in question responded successfully. Encapsulation of transport-related to logic. Handles instantiation of the individual nodes as well as creating a node pool to hold them. Main interface is the :meth:`elastic_transport.Transport.perform_request` method. :arg node_configs: List of 'NodeConfig' instances to create initial set of nodes. :arg node_class: subclass of :class:`~elastic_transport.BaseNode` to use or the name of the Connection (ie 'urllib3', 'requests') :arg node_pool_class: subclass of :class:`~elastic_transport.NodePool` to use :arg randomize_nodes_in_pool: Set to false to not randomize nodes within the pool. Defaults to true. :arg node_selector_class: Class to be used to select nodes within the :class:`~elastic_transport.NodePool`. :arg dead_node_backoff_factor: Exponential backoff factor to calculate the amount of time to timeout a node after an unsuccessful API call. :arg max_dead_node_backoff: Maximum amount of time to timeout a node after an unsuccessful API call. :arg serializers: optional dict of serializer instances that will be used for deserializing data coming from the server. (key is the mimetype) :arg max_retries: Maximum number of retries for an API call. Set to 0 to disable retries. Defaults to ``0``. :arg retry_on_status: set of HTTP status codes on which we should retry on a different node. defaults to ``(429, 502, 503, 504)`` :arg retry_on_timeout: should timeout trigger a retry on different node? (default ``False``) :arg sniff_on_start: If ``True`` will sniff for additional nodes as soon as possible, guaranteed before the first request. :arg sniff_on_node_failure: If ``True`` will sniff for additional nodees after a node is marked as dead in the pool. :arg sniff_before_requests: If ``True`` will occasionally sniff for additional nodes as requests are sent. :arg sniff_timeout: Timeout value in seconds to use for sniffing requests. Defaults to 1 second. :arg min_delay_between_sniffing: Number of seconds to wait between calls to :meth:`elastic_transport.Transport.sniff` to avoid sniffing too frequently. Defaults to 10 seconds. :arg sniff_callback: Function that is passed a :class:`elastic_transport.Transport` and :class:`elastic_transport.SniffOptions` and should do node discovery and return a list of :class:`elastic_transport.NodeConfig` instances. :arg meta_header: If set to False the ``X-Elastic-Client-Meta`` HTTP header won't be sent. Defaults to True. :arg client_meta_service: Key-value pair for the service field of the client metadata header. Defaults to the service key-value for Elastic Transport. # Create the default metadata for the x-elastic-client-meta # HTTP header. Only requires adding the (service, service_version) # tuple to the beginning of the client_meta # Grab the 'HTTP_CLIENT_META' property from the node class # serialization config # if custom serializers map has been supplied, override the defaults with it # Create our collection of serializers # Set of default request options # Build the NodePool from all the options # Used to track whether we're currently sniffing. # type: ignore[override,return] Perform the actual request. Retrieve a node from the node pool, pass all the information to it's perform_request method and return the data. If an exception was raised, mark the node as failed and retry (up to ``max_retries`` times). If the operation was successful and the node used was previously marked as dead, mark it as live, resetting it's failure count. :arg method: HTTP method to use :arg target: HTTP request target :arg body: body of the request, will be serialized using serializer and passed to the node :arg headers: Additional headers to send with the request. :arg max_retries: Maximum number of retries before giving up on a request. Set to ``0`` to disable retries. :arg retry_on_status: Collection of HTTP status codes to retry. :arg retry_on_timeout: Set to true to retry after timeout errors. :arg request_timeout: Amount of time to wait for a response to fail with a timeout error. :arg client_meta: Extra client metadata key-value pairs to send in the client meta header. :returns: Tuple of the :class:`elastic_transport.ApiResponseMeta` with the deserialized response. # Serialize the request body to bytes based on the given mimetype. # Errors are stored from (oldest->newest) # If we sniff before requests are made we want to do so before # 'node_pool.get()' is called so our sniffed nodes show up in the pool. # Keep track of the last response we see so we can return # it in case the retried request returns with a transport error. # If the error was determined to be a node failure # we mark it dead in the node pool to allow for # other nodes to be retried. # If sniffing on failure, it could fail too. Catch the # exception not to interrupt the retries. # Since we're exhausted but we have previously # received some sort of response from the API # we should forward that along instead of the # transport error. Likely to be more actionable. # If we got back a response we need to check if that status # is indicative of a healthy node even if it's a non-2XX status # If sniffing on failure, it could fail too. Catch the # exception not to interrupt the retries. # We either got a response we're happy with or # we've exhausted all of our retries so we return it. # If sniffing failed for any reason we # want to allow retrying immediately. # If we started a sniff we need to release the lock. Explicitly closes all nodes in the transport's pool Decide if we should sniff or not. If we return ``True`` from this method the caller has a responsibility to unlock the ``_sniffing_lock`` Validates the Transport configurations for sniffing # If we're sniffing we want to warn the user for non-homogenous NodeConfigs. Function which detects situations when sniffing may produce incorrect configs # Detected two nodes that have different config, warn the user. | 1.56416 | 2 |
deepfcn/data/extract_node_features.py | shobrook/DeepFCN | 9 | 6617024 | <reponame>shobrook/DeepFCN<filename>deepfcn/data/extract_node_features.py
# Standard Library
from statistics import mean, median, stdev, variance
# Third Party
import nolds
import numpy as np
import networkx as nx
from scipy.stats import kurtosis, skew, linregress
# Local
from data.extract_fcn import extract_fcn
######################
# TIME SERIES FEATURES
######################
TIME_SERIES_NODE_FEATURES = ["entropy", "fractal_dim", "lyap_r", "dfa", "mean",
"median", "range", "std", "auto_corr", "auto_cov"]
EXTRACT_TS_FEATURE = {
# "approx_entropy": nolds.sampen, # BUG: Produces inf values
# "fractal_dim": lambda node_ts: nolds.corr_dim(node_ts, emb_dim=10), # BUG: Always produces the same value
"lyap_r": nolds.lyap_r,
"hurst_rs": nolds.hurst_rs,
"dfa": nolds.dfa,
"mean": mean,
"median": median,
"range": lambda signal: max(signal) - min(signal),
"std": stdev,
"skew": skew,
"kurtosis": kurtosis
}
def _right_shift_time_series(time_series, shift_by):
rs_time_series = []
for i in range(len(time_series) - 1):
rs_time_series.append(time_series[i + 1])
return rs_time_series
def _calculate_auto_corr_cov(time_series):
rs_time_series = _right_shift_time_series(time_series, shift_by=1)
slope, intercept, auto_corr, p_val, std_err = linregress(
time_series[:-1],
rs_time_series
)
auto_cov = slope * variance(time_series)
return auto_corr, auto_cov
def _extract_time_series_features(signals, feature_names):
feature_names = [f for f in feature_names if f in TIME_SERIES_NODE_FEATURES]
num_rois, num_features = len(signals), len(feature_names)
feature_matrix = np.empty([num_rois, num_features])
if not feature_names:
return feature_matrix
for i in range(num_rois):
for j, feature_name in enumerate(feature_names):
if feature_name == "auto_corr":
feature_matrix[i][j], _ = _calculate_auto_corr_cov(signals[i])
elif feature_name == "auto_cov":
_, feature_matrix[i][j] = _calculate_auto_corr_cov(signals[i])
else:
extract_feature = EXTRACT_TS_FEATURE[feature_name]
feature_matrix[i][j] = extract_feature(signals[i])
return feature_matrix
##################
# NETWORK FEATURES
##################
NETWORK_NODE_FEATURES = ["weighted_degree", "clustering_coef",
"closeness_centrality", "betweenness_centrality"]
EXTRACT_NETWORK_FEATURE = {
"weighted_degree": lambda G, i: G.degree[i],
"clustering_coef": lambda G, i: nx.clustering(G, weight="weight")[i],
"degree_centrality": lambda G, i: nx.degree_centrality(G)[i],
"closeness_centrality": lambda H, i: nx.closeness_centrality(H, distance="weight")[i],
"betweenness_centrality": lambda H, i: nx.betweenness_centrality(H, distance="weight")[i]
}
def _create_networkx_graph(signals):
fc_matrix = np.squeeze(extract_fcn(signals))
return nx.from_numpy_matrix(np.matrix(fc_matrix))
def _create_reciprical_graph(G):
# NOTE: Dijkstra's algorithm is used for computing shortest path lengths for
# the closeness and betweenness centrality. So, weights have to be
# recalculated to represent "distances" instead of correlations.
H = G.copy()
for i, j, data in H.edges(data=True):
data["weight"] = 1 / abs(data["weight"])
return H
def _extract_network_features(signals, feature_names):
feature_names = [f for f in feature_names if f in NETWORK_NODE_FEATURES]
num_rois, num_features = len(signals), len(feature_names)
feature_matrix = np.empty([num_rois, num_features])
if not feature_names:
return feature_matrix
G = _create_networkx_graph(signals)
H = _create_reciprical_graph(G)
for i in range(num_rois):
for j, feature_name in enumerate(feature_names):
extract_feature = EXTRACT_NETWORK_FEATURE[feature_name]
if feature_name in ("closeness_centrality", "betweenness_centrality"):
feature_matrix[i][j] = extract_feature(H, i)
else:
feature_matrix[i][j] = extract_feature(G, i)
return feature_matrix
######
# MAIN
######
def extract_node_features(signals, feature_names=["mean"]):
"""
Extracts node (ROI) features from BOLD signals. Features can either be
calculated from the node's time series, or from the node's graph theoretic
properties in a FCN.
Parameters
----------
signals : numpy.ndarray
Array of BOLD signals; shape = [num_rois, time_series_len]
feature_names : list
Names of node features to extract
Returns
-------
numpy.ndarray
Array of ROI/node features; shape = [num_rois, num_features]
"""
ts_features = _extract_time_series_features(signals, feature_names)
network_features = _extract_network_features(signals, feature_names)
return np.concatenate((ts_features, network_features), axis=1)
| # Standard Library
from statistics import mean, median, stdev, variance
# Third Party
import nolds
import numpy as np
import networkx as nx
from scipy.stats import kurtosis, skew, linregress
# Local
from data.extract_fcn import extract_fcn
######################
# TIME SERIES FEATURES
######################
TIME_SERIES_NODE_FEATURES = ["entropy", "fractal_dim", "lyap_r", "dfa", "mean",
"median", "range", "std", "auto_corr", "auto_cov"]
EXTRACT_TS_FEATURE = {
# "approx_entropy": nolds.sampen, # BUG: Produces inf values
# "fractal_dim": lambda node_ts: nolds.corr_dim(node_ts, emb_dim=10), # BUG: Always produces the same value
"lyap_r": nolds.lyap_r,
"hurst_rs": nolds.hurst_rs,
"dfa": nolds.dfa,
"mean": mean,
"median": median,
"range": lambda signal: max(signal) - min(signal),
"std": stdev,
"skew": skew,
"kurtosis": kurtosis
}
def _right_shift_time_series(time_series, shift_by):
rs_time_series = []
for i in range(len(time_series) - 1):
rs_time_series.append(time_series[i + 1])
return rs_time_series
def _calculate_auto_corr_cov(time_series):
rs_time_series = _right_shift_time_series(time_series, shift_by=1)
slope, intercept, auto_corr, p_val, std_err = linregress(
time_series[:-1],
rs_time_series
)
auto_cov = slope * variance(time_series)
return auto_corr, auto_cov
def _extract_time_series_features(signals, feature_names):
feature_names = [f for f in feature_names if f in TIME_SERIES_NODE_FEATURES]
num_rois, num_features = len(signals), len(feature_names)
feature_matrix = np.empty([num_rois, num_features])
if not feature_names:
return feature_matrix
for i in range(num_rois):
for j, feature_name in enumerate(feature_names):
if feature_name == "auto_corr":
feature_matrix[i][j], _ = _calculate_auto_corr_cov(signals[i])
elif feature_name == "auto_cov":
_, feature_matrix[i][j] = _calculate_auto_corr_cov(signals[i])
else:
extract_feature = EXTRACT_TS_FEATURE[feature_name]
feature_matrix[i][j] = extract_feature(signals[i])
return feature_matrix
##################
# NETWORK FEATURES
##################
NETWORK_NODE_FEATURES = ["weighted_degree", "clustering_coef",
"closeness_centrality", "betweenness_centrality"]
EXTRACT_NETWORK_FEATURE = {
"weighted_degree": lambda G, i: G.degree[i],
"clustering_coef": lambda G, i: nx.clustering(G, weight="weight")[i],
"degree_centrality": lambda G, i: nx.degree_centrality(G)[i],
"closeness_centrality": lambda H, i: nx.closeness_centrality(H, distance="weight")[i],
"betweenness_centrality": lambda H, i: nx.betweenness_centrality(H, distance="weight")[i]
}
def _create_networkx_graph(signals):
fc_matrix = np.squeeze(extract_fcn(signals))
return nx.from_numpy_matrix(np.matrix(fc_matrix))
def _create_reciprical_graph(G):
# NOTE: Dijkstra's algorithm is used for computing shortest path lengths for
# the closeness and betweenness centrality. So, weights have to be
# recalculated to represent "distances" instead of correlations.
H = G.copy()
for i, j, data in H.edges(data=True):
data["weight"] = 1 / abs(data["weight"])
return H
def _extract_network_features(signals, feature_names):
feature_names = [f for f in feature_names if f in NETWORK_NODE_FEATURES]
num_rois, num_features = len(signals), len(feature_names)
feature_matrix = np.empty([num_rois, num_features])
if not feature_names:
return feature_matrix
G = _create_networkx_graph(signals)
H = _create_reciprical_graph(G)
for i in range(num_rois):
for j, feature_name in enumerate(feature_names):
extract_feature = EXTRACT_NETWORK_FEATURE[feature_name]
if feature_name in ("closeness_centrality", "betweenness_centrality"):
feature_matrix[i][j] = extract_feature(H, i)
else:
feature_matrix[i][j] = extract_feature(G, i)
return feature_matrix
######
# MAIN
######
def extract_node_features(signals, feature_names=["mean"]):
"""
Extracts node (ROI) features from BOLD signals. Features can either be
calculated from the node's time series, or from the node's graph theoretic
properties in a FCN.
Parameters
----------
signals : numpy.ndarray
Array of BOLD signals; shape = [num_rois, time_series_len]
feature_names : list
Names of node features to extract
Returns
-------
numpy.ndarray
Array of ROI/node features; shape = [num_rois, num_features]
"""
ts_features = _extract_time_series_features(signals, feature_names)
network_features = _extract_network_features(signals, feature_names)
return np.concatenate((ts_features, network_features), axis=1) | en | 0.721103 | # Standard Library # Third Party # Local ###################### # TIME SERIES FEATURES ###################### # "approx_entropy": nolds.sampen, # BUG: Produces inf values # "fractal_dim": lambda node_ts: nolds.corr_dim(node_ts, emb_dim=10), # BUG: Always produces the same value ################## # NETWORK FEATURES ################## # NOTE: Dijkstra's algorithm is used for computing shortest path lengths for # the closeness and betweenness centrality. So, weights have to be # recalculated to represent "distances" instead of correlations. ###### # MAIN ###### Extracts node (ROI) features from BOLD signals. Features can either be calculated from the node's time series, or from the node's graph theoretic properties in a FCN. Parameters ---------- signals : numpy.ndarray Array of BOLD signals; shape = [num_rois, time_series_len] feature_names : list Names of node features to extract Returns ------- numpy.ndarray Array of ROI/node features; shape = [num_rois, num_features] | 2.338071 | 2 |
plugins/help.py | hexoserver1/justice | 28 | 6617025 | from utils.safe import JusticePlugin
from disco.types.message import MessageEmbed
class HelpPlug(JusticePlugin):
"""Help | Display command details"""
__name__ = "HelpPlug"
@JusticePlugin.command("help", "[name:str]")
def show_help(self, event, name: str = None):
"""Explain commands or list them
The help commands provides an easy way for you to learn about a certain command, or list available ones.
If you want to access a specific command, type `]help <name>`, For example, `]help ban`.
If you want to display a list all command categories, simply type `]help` with nothing else.
If you want to list all commands in a category, simply type `]help <Name>`, For example, `]help Mod`
Tip: commands will always be all lower case, command categories are Titled.
"""
if not name:
embed = MessageEmbed()
embed.color = 0x00FFFF
embed.title = "List Command Categories"
embed.description = "If you want to see how to use the help command, type `]help help`, otherwise, " \
"below are the available command categories."
for plugin in self.bot.plugins.values():
name, desc = plugin.__doc__.split(' | ')
embed.add_field(name=name, value=desc, inline=False)
event.msg.reply(embed=embed)
elif name.title() == name:
for plugin in self.bot.plugins.values():
if name in plugin.__doc__:
break
else:
return event.msg.reply("Sorry, but I could not find the category '{0}'".format(name))
embed = MessageEmbed()
embed.color = 0x00FFFF
embed.title = plugin.__doc__
for func in plugin.meta_funcs:
if hasattr(func, 'docs'):
embed.add_field(name=func.docs[0], value=func.docs[1], inline=False)
event.msg.reply(embed=embed)
else:
for plugin in self.bot.plugins.values():
for func in plugin.meta_funcs:
if hasattr(func, 'docs') and func.docs[0] == name:
embed = MessageEmbed()
embed.title = func.docs[1]
embed.color = 0x00FFFF
embed.description = func.docs[2]
return event.msg.reply(embed=embed)
event.msg.reply("Sorry, but I could not find the command '{0}'".format(name))
del JusticePlugin # We don't want disco to load this plugin
| from utils.safe import JusticePlugin
from disco.types.message import MessageEmbed
class HelpPlug(JusticePlugin):
"""Help | Display command details"""
__name__ = "HelpPlug"
@JusticePlugin.command("help", "[name:str]")
def show_help(self, event, name: str = None):
"""Explain commands or list them
The help commands provides an easy way for you to learn about a certain command, or list available ones.
If you want to access a specific command, type `]help <name>`, For example, `]help ban`.
If you want to display a list all command categories, simply type `]help` with nothing else.
If you want to list all commands in a category, simply type `]help <Name>`, For example, `]help Mod`
Tip: commands will always be all lower case, command categories are Titled.
"""
if not name:
embed = MessageEmbed()
embed.color = 0x00FFFF
embed.title = "List Command Categories"
embed.description = "If you want to see how to use the help command, type `]help help`, otherwise, " \
"below are the available command categories."
for plugin in self.bot.plugins.values():
name, desc = plugin.__doc__.split(' | ')
embed.add_field(name=name, value=desc, inline=False)
event.msg.reply(embed=embed)
elif name.title() == name:
for plugin in self.bot.plugins.values():
if name in plugin.__doc__:
break
else:
return event.msg.reply("Sorry, but I could not find the category '{0}'".format(name))
embed = MessageEmbed()
embed.color = 0x00FFFF
embed.title = plugin.__doc__
for func in plugin.meta_funcs:
if hasattr(func, 'docs'):
embed.add_field(name=func.docs[0], value=func.docs[1], inline=False)
event.msg.reply(embed=embed)
else:
for plugin in self.bot.plugins.values():
for func in plugin.meta_funcs:
if hasattr(func, 'docs') and func.docs[0] == name:
embed = MessageEmbed()
embed.title = func.docs[1]
embed.color = 0x00FFFF
embed.description = func.docs[2]
return event.msg.reply(embed=embed)
event.msg.reply("Sorry, but I could not find the command '{0}'".format(name))
del JusticePlugin # We don't want disco to load this plugin
| en | 0.797441 | Help | Display command details Explain commands or list them The help commands provides an easy way for you to learn about a certain command, or list available ones. If you want to access a specific command, type `]help <name>`, For example, `]help ban`. If you want to display a list all command categories, simply type `]help` with nothing else. If you want to list all commands in a category, simply type `]help <Name>`, For example, `]help Mod` Tip: commands will always be all lower case, command categories are Titled. # We don't want disco to load this plugin | 2.880797 | 3 |
gimpbbio/gimpbbio/tests/test_gpio.py | SaintGimp/BeagleBoneHardware | 2 | 6617026 | from gimpbbio import gpio
from . import fake_filesystem
def test_pins_are_accessible_via_key():
assert gpio.pins["USR0"].name == "USR0"
def test_pins_are_accessible_via_attribute():
assert gpio.pins.usr0.name == "USR0"
def test_only_gpio_pins_are_accessible():
assert not hasattr(gpio.pins, "P9_1")
def test_can_open_for_output(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_output()
export = fake_filesystem.get("/sys/class/gpio/export")
assert export.content == "47"
direction = fake_filesystem.get("/sys/class/gpio/gpio47/direction")
assert direction.content == "out"
def test_can_set_output_pin_value_high(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_output()
gpio.pins.p8_15.set_high()
value_file = fake_filesystem.get("/sys/class/gpio/gpio47/value")
assert value_file.content == True
def test_can_set_output_pin_value_low(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_output()
gpio.pins.p8_15.set_low()
value_file = fake_filesystem.get("/sys/class/gpio/gpio47/value")
assert value_file.content == False
def test_can_open_for_input(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_input()
export = fake_filesystem.get("/sys/class/gpio/export")
assert export.content == "47"
direction = fake_filesystem.get("/sys/class/gpio/gpio47/direction")
assert direction.content == "in"
def test_can_tell_if_pin_is_high(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_input()
value_file = fake_filesystem.get("/sys/class/gpio/gpio47/value")
value_file.content = True
assert gpio.pins.p8_15.is_high()
assert not gpio.pins.p8_15.is_low()
def test_can_tell_if_pin_is_low(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_input()
value_file = fake_filesystem.get("/sys/class/gpio/gpio47/value")
value_file.content = False
assert gpio.pins.p8_15.is_low()
assert not gpio.pins.p8_15.is_high()
def test_can_close(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_input()
gpio.pins.p8_15.close()
value_file = fake_filesystem.get("/sys/class/gpio/gpio47/value")
value_file.opened_mode = None
unexport_file = fake_filesystem.get("/sys/class/gpio/unexport")
assert unexport_file.content == "47"
def test_can_open_for_input_with_pullup(monkeypatch):
fake_filesystem.hook(monkeypatch)
monkeypatch.setattr("os.listdir", lambda directory: ["bone_capemgr.8"])
slots_file = fake_filesystem.get("/sys/devices/bone_capemgr.8/slots")
slots_file.content = "some other overlay"
def check_command(command): assert "gimpbbio_P8_15-00A0" in command
monkeypatch.setattr("os.system", check_command)
gpio.pins.p8_15.open_for_input(pull = gpio.PULL_UP)
value_file = fake_filesystem.get("/lib/firmware/gimpbbio_P8_15-00A0.dts")
assert "P8_15" in value_file.content
assert slots_file.content == "gimpbbio_P8_15"
def test_can_open_for_input_with_active_low(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_input(active_state = gpio.ACTIVE_LOW)
direction = fake_filesystem.get("/sys/class/gpio/gpio47/active_low")
assert direction.content == "1"
| from gimpbbio import gpio
from . import fake_filesystem
def test_pins_are_accessible_via_key():
assert gpio.pins["USR0"].name == "USR0"
def test_pins_are_accessible_via_attribute():
assert gpio.pins.usr0.name == "USR0"
def test_only_gpio_pins_are_accessible():
assert not hasattr(gpio.pins, "P9_1")
def test_can_open_for_output(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_output()
export = fake_filesystem.get("/sys/class/gpio/export")
assert export.content == "47"
direction = fake_filesystem.get("/sys/class/gpio/gpio47/direction")
assert direction.content == "out"
def test_can_set_output_pin_value_high(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_output()
gpio.pins.p8_15.set_high()
value_file = fake_filesystem.get("/sys/class/gpio/gpio47/value")
assert value_file.content == True
def test_can_set_output_pin_value_low(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_output()
gpio.pins.p8_15.set_low()
value_file = fake_filesystem.get("/sys/class/gpio/gpio47/value")
assert value_file.content == False
def test_can_open_for_input(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_input()
export = fake_filesystem.get("/sys/class/gpio/export")
assert export.content == "47"
direction = fake_filesystem.get("/sys/class/gpio/gpio47/direction")
assert direction.content == "in"
def test_can_tell_if_pin_is_high(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_input()
value_file = fake_filesystem.get("/sys/class/gpio/gpio47/value")
value_file.content = True
assert gpio.pins.p8_15.is_high()
assert not gpio.pins.p8_15.is_low()
def test_can_tell_if_pin_is_low(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_input()
value_file = fake_filesystem.get("/sys/class/gpio/gpio47/value")
value_file.content = False
assert gpio.pins.p8_15.is_low()
assert not gpio.pins.p8_15.is_high()
def test_can_close(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_input()
gpio.pins.p8_15.close()
value_file = fake_filesystem.get("/sys/class/gpio/gpio47/value")
value_file.opened_mode = None
unexport_file = fake_filesystem.get("/sys/class/gpio/unexport")
assert unexport_file.content == "47"
def test_can_open_for_input_with_pullup(monkeypatch):
fake_filesystem.hook(monkeypatch)
monkeypatch.setattr("os.listdir", lambda directory: ["bone_capemgr.8"])
slots_file = fake_filesystem.get("/sys/devices/bone_capemgr.8/slots")
slots_file.content = "some other overlay"
def check_command(command): assert "gimpbbio_P8_15-00A0" in command
monkeypatch.setattr("os.system", check_command)
gpio.pins.p8_15.open_for_input(pull = gpio.PULL_UP)
value_file = fake_filesystem.get("/lib/firmware/gimpbbio_P8_15-00A0.dts")
assert "P8_15" in value_file.content
assert slots_file.content == "gimpbbio_P8_15"
def test_can_open_for_input_with_active_low(monkeypatch):
fake_filesystem.hook(monkeypatch)
gpio.pins.p8_15.open_for_input(active_state = gpio.ACTIVE_LOW)
direction = fake_filesystem.get("/sys/class/gpio/gpio47/active_low")
assert direction.content == "1"
| none | 1 | 2.354685 | 2 | |
train_evolving.py | czc567/UniGNN | 22 | 6617027 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import optimizer
import os
import numpy as np
import time
import datetime
import path
import shutil
import config
args = config.parse()
# gpu, seed
torch.manual_seed(args.seed)
np.random.seed(args.seed)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
os.environ['PYTHONHASHSEED'] = str(args.seed)
use_norm = 'use-norm' if args.use_norm else 'no-norm'
add_self_loop = 'add-self-loop' if args.add_self_loop else 'no-self-loop'
#### configure output directory
dataname = f'{args.data}_{args.dataset}'
model_name = args.model_name
nlayer = args.nlayer
dirname = f'{datetime.datetime.now()}'.replace(' ', '_').replace(':', '.')
out_dir = path.Path( f'./{args.out_dir}/{model_name}_{nlayer}_{dataname}/seed_{args.seed}' )
if out_dir.exists():
shutil.rmtree(out_dir)
out_dir.makedirs_p()
### configure logger
from logger import get_logger
baselogger = get_logger('base logger', f'{out_dir}/logging.log', not args.nostdout)
resultlogger = get_logger('result logger', f'{out_dir}/result.log', not args.nostdout)
baselogger.info(args)
# load data
from prepare import *
test_seens, test_unseens = [], []
best_test_seens, best_test_unseens = [], []
resultlogger.info(args)
def get_split(Y, p=0.2):
from random import sample, shuffle
Y = Y.tolist()
N, nclass = len(Y), len(set(Y))
D = [[] for _ in range(nclass)]
for i, y in enumerate(Y):
D[y].append(i)
k = int(N * p / nclass)
train_idx = torch.cat([torch.LongTensor(sample(idxs, k)) for idxs in D]).tolist()
test_idx = list(set(range(N)) - set(train_idx))
shuffle(train_idx)
shuffle(test_idx)
seen_len = len(test_idx) // 2
test_idx_seen, test_idx_unseen = test_idx[:seen_len], test_idx[seen_len:]
return train_idx, test_idx_seen, test_idx_unseen
# load data
X, Y, G = fetch_data(args)
for run in range(1, args.n_runs+1):
run_dir = out_dir / f'{run}'
run_dir.makedirs_p()
train_idx, test_idx_seen, test_idx_unseen = get_split(Y, 0.2)
from collections import Counter
counter = Counter(Y[train_idx].tolist())
print(counter)
Xseen = X.clone()
Xseen[test_idx_unseen] = 0
# model
model, optimizer = initialise(Xseen, Y, G, args, test_idx_unseen)
baselogger.info(f'Run {run}/{args.n_runs}, Total Epochs: {args.epochs}')
baselogger.info(model)
baselogger.info( f'total_params:{sum(p.numel() for p in model.parameters() if p.requires_grad)}' )
tic_run = time.time()
best_test_seen, best_test_unseen, test_seen, test_unseen, Z = 0, 0, 0, 0, None
for epoch in range(args.epochs):
# train
tic_epoch = time.time()
model.train()
optimizer.zero_grad()
Z = model(Xseen)
loss = F.nll_loss(Z[train_idx], Y[train_idx])
loss.backward()
optimizer.step()
train_time = time.time() - tic_epoch
# eval
model.eval()
Z = model(X)
train_acc= accuracy(Z[train_idx], Y[train_idx])
test_seen = accuracy(Z[test_idx_seen], Y[test_idx_seen])
test_unseen = accuracy(Z[test_idx_unseen], Y[test_idx_unseen])
best_test_seen = max(best_test_seen, test_seen)
best_test_unseen = max(best_test_unseen, test_unseen)
baselogger.info(f'epoch:{epoch} | loss:{loss:.4f} | train acc:{train_acc:.2f} | best_seen: {best_test_seen:.2f} | seen:{test_seen:.2f} | best_unseen: {best_test_unseen:.2f} | unseen:{test_unseen:.2f} | time:{train_time*1000:.1f}ms')
resultlogger.info(f"Run {run}/{args.n_runs}, best_seen: {best_test_seen:.2f}, seen(last): {test_seen:.2f}, best_unseen: {best_test_unseen:.2f} , unseen:{test_unseen:.2f}, total time: {time.time()-tic_run:.2f}s")
test_seens.append(test_seen)
test_unseens.append(test_unseen)
best_test_seens.append(best_test_seen)
best_test_unseens.append(best_test_unseen)
resultlogger.info(f"Average final seen: {np.mean(test_seens)} ± {np.std(test_seens)}")
resultlogger.info(f"Average best seen: {np.mean(best_test_seens)} ± {np.std(best_test_seens)}")
resultlogger.info(f"Average final unseen: {np.mean(test_unseens)} ± {np.std(test_unseens)}")
resultlogger.info(f"Average best unseen: {np.mean(best_test_unseens)} ± {np.std(best_test_unseens)}")
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import optimizer
import os
import numpy as np
import time
import datetime
import path
import shutil
import config
args = config.parse()
# gpu, seed
torch.manual_seed(args.seed)
np.random.seed(args.seed)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
os.environ['PYTHONHASHSEED'] = str(args.seed)
use_norm = 'use-norm' if args.use_norm else 'no-norm'
add_self_loop = 'add-self-loop' if args.add_self_loop else 'no-self-loop'
#### configure output directory
dataname = f'{args.data}_{args.dataset}'
model_name = args.model_name
nlayer = args.nlayer
dirname = f'{datetime.datetime.now()}'.replace(' ', '_').replace(':', '.')
out_dir = path.Path( f'./{args.out_dir}/{model_name}_{nlayer}_{dataname}/seed_{args.seed}' )
if out_dir.exists():
shutil.rmtree(out_dir)
out_dir.makedirs_p()
### configure logger
from logger import get_logger
baselogger = get_logger('base logger', f'{out_dir}/logging.log', not args.nostdout)
resultlogger = get_logger('result logger', f'{out_dir}/result.log', not args.nostdout)
baselogger.info(args)
# load data
from prepare import *
test_seens, test_unseens = [], []
best_test_seens, best_test_unseens = [], []
resultlogger.info(args)
def get_split(Y, p=0.2):
from random import sample, shuffle
Y = Y.tolist()
N, nclass = len(Y), len(set(Y))
D = [[] for _ in range(nclass)]
for i, y in enumerate(Y):
D[y].append(i)
k = int(N * p / nclass)
train_idx = torch.cat([torch.LongTensor(sample(idxs, k)) for idxs in D]).tolist()
test_idx = list(set(range(N)) - set(train_idx))
shuffle(train_idx)
shuffle(test_idx)
seen_len = len(test_idx) // 2
test_idx_seen, test_idx_unseen = test_idx[:seen_len], test_idx[seen_len:]
return train_idx, test_idx_seen, test_idx_unseen
# load data
X, Y, G = fetch_data(args)
for run in range(1, args.n_runs+1):
run_dir = out_dir / f'{run}'
run_dir.makedirs_p()
train_idx, test_idx_seen, test_idx_unseen = get_split(Y, 0.2)
from collections import Counter
counter = Counter(Y[train_idx].tolist())
print(counter)
Xseen = X.clone()
Xseen[test_idx_unseen] = 0
# model
model, optimizer = initialise(Xseen, Y, G, args, test_idx_unseen)
baselogger.info(f'Run {run}/{args.n_runs}, Total Epochs: {args.epochs}')
baselogger.info(model)
baselogger.info( f'total_params:{sum(p.numel() for p in model.parameters() if p.requires_grad)}' )
tic_run = time.time()
best_test_seen, best_test_unseen, test_seen, test_unseen, Z = 0, 0, 0, 0, None
for epoch in range(args.epochs):
# train
tic_epoch = time.time()
model.train()
optimizer.zero_grad()
Z = model(Xseen)
loss = F.nll_loss(Z[train_idx], Y[train_idx])
loss.backward()
optimizer.step()
train_time = time.time() - tic_epoch
# eval
model.eval()
Z = model(X)
train_acc= accuracy(Z[train_idx], Y[train_idx])
test_seen = accuracy(Z[test_idx_seen], Y[test_idx_seen])
test_unseen = accuracy(Z[test_idx_unseen], Y[test_idx_unseen])
best_test_seen = max(best_test_seen, test_seen)
best_test_unseen = max(best_test_unseen, test_unseen)
baselogger.info(f'epoch:{epoch} | loss:{loss:.4f} | train acc:{train_acc:.2f} | best_seen: {best_test_seen:.2f} | seen:{test_seen:.2f} | best_unseen: {best_test_unseen:.2f} | unseen:{test_unseen:.2f} | time:{train_time*1000:.1f}ms')
resultlogger.info(f"Run {run}/{args.n_runs}, best_seen: {best_test_seen:.2f}, seen(last): {test_seen:.2f}, best_unseen: {best_test_unseen:.2f} , unseen:{test_unseen:.2f}, total time: {time.time()-tic_run:.2f}s")
test_seens.append(test_seen)
test_unseens.append(test_unseen)
best_test_seens.append(best_test_seen)
best_test_unseens.append(best_test_unseen)
resultlogger.info(f"Average final seen: {np.mean(test_seens)} ± {np.std(test_seens)}")
resultlogger.info(f"Average best seen: {np.mean(best_test_seens)} ± {np.std(best_test_seens)}")
resultlogger.info(f"Average final unseen: {np.mean(test_unseens)} ± {np.std(test_unseens)}")
resultlogger.info(f"Average best unseen: {np.mean(best_test_unseens)} ± {np.std(best_test_unseens)}")
| en | 0.311738 | # gpu, seed #### configure output directory ### configure logger # load data # load data # model # train # eval | 2.030807 | 2 |
wotv_bot.py | andrewhayden/ffbe_forever_guild_bot | 0 | 6617028 | <reponame>andrewhayden/ffbe_forever_guild_bot<filename>wotv_bot.py
"""The runtime heart of the WOTV Bot."""
from __future__ import annotations
from dataclasses import dataclass
import io
from re import Match
from typing import List
import discord
from admin_utils import AdminUtils
from data_files import DataFiles
from data_file_search_utils import DataFileSearchUtils, UnitSkillSearchResult, UnitJobSearchResult, UnitSearchResult
from data_file_core_classes import WotvUnit
from esper_resonance_manager import EsperResonanceManager
from predictions import Predictions
from reminders import Reminders
from rolling import DiceSpec, Rolling
from vision_card_ocr_utils import VisionCardOcrUtils
from vision_card_manager import VisionCardManager
from weekly_event_schedule import WeeklyEventSchedule
from wotv_bot_common import ExposableException
from wotv_bot_constants import WotvBotConstants
class DiscordSafeException(ExposableException):
"""An exception whose error text is safe to show in Discord."""
def __init__(self, message):
super(DiscordSafeException, self).__init__(message)
self.message = message
@dataclass
class WotvBotConfig:
"""Configuration for a single instance of the bot. All fields are required to be set.
access_control_spreadsheet_id: the ID of the spreadsheet where access controls are kept
esper_resonance_spreadsheet_id: the ID of the spreadsheet where esper resonance is tracked
sandbox_esper_resonance_spreadsheet_id: the ID of the sandbox alternative to the real esper_resonance_spreadsheet_id
vision_card_spreadsheet_id: the ID of the spreadsheet where vision cards are tracked
spreadsheet_app: the Google spreadsheets Resource obtained from calling the spreadsheets() method on a Service Resource.
discord_client: the Discord client
data_files: the WotV data dump.
reminders: the reminders subsystem.
"""
access_control_spreadsheet_id: str = None
esper_resonance_spreadsheet_id: str = None
sandbox_esper_resonance_spreadsheet_id: str = None
vision_card_spreadsheet_id: str = None
spreadsheet_app = None
discord_client: discord.Client = None
data_files: DataFiles = None
reminders: Reminders = None
@dataclass
class CommandContextInfo:
"""Context information for the command that is being executed."""
from_name: str = None # Convenience
from_id: str = None # Convenience
from_discrim: str = None # Convenience
original_message: discord.Message = None # For unusual use cases
esper_resonance_manager: EsperResonanceManager = None
vision_card_manager: VisionCardManager = None
command_match: Match = None
def shallowCopy(self) -> CommandContextInfo:
"""Make a shallow copy of this object, containing only the from_name, from_id, from_discrim and original_message fields"""
result = CommandContextInfo()
result.from_name = self.from_name
result.from_id = self.from_id
result.from_name = self.from_name
result.original_message = self.original_message
return result
def withEsperResonanceManager(self, esper_resonance_manager: EsperResonanceManager) -> CommandContextInfo:
"""Assign the specified esper resonance manager and return a reference to this object."""
self.esper_resonance_manager = esper_resonance_manager
return self
def withVisionCardManager(self, vision_card_manager: VisionCardManager) -> CommandContextInfo:
"""Assign the specified vision card manager and return a reference to this object."""
self.vision_card_manager = vision_card_manager
return self
def withMatch(self, the_match: Match) -> CommandContextInfo:
"""Assign the specified match and return a reference to this object."""
self.command_match = the_match
return self
class WotvBot:
"""An instance of the bot, configured to manage specific spreadsheets and using Discord and Google credentials."""
# The static instance of the bot, not for general consumption.
__staticInstance: WotvBot = None
def __init__(self, wotv_bot_config: WotvBotConfig):
self.wotv_bot_config = wotv_bot_config
# Set this to true in an integration test to allow a local filesystem path to be used in a Discord
# message as the source of the image to be processed by OCR for Vision Card text extraction. For
# obvious security reasons, this is false by default.
self.INTEG_TEST_LOCAL_FILESYSTEM_READ_FOR_VISION_CARD = False
# Set the static instance of the bot to this instance.
WotvBot.__staticInstance = self
self.whimsy_shop_nrg_reminder_delay_ms: int = 30*60*1000 # 30 minutes
self.whimsy_shop_spawn_reminder_delay_ms: int = 60*60*1000 # 60 minutes
self.predictions = Predictions('predictions.txt')
self.predictions.refreshPredictions()
self.last_status = None # Last status set
@staticmethod
def getStaticInstance():
"""Returns an unsafe static reference to the "current" bot, if there is one. In reality this is just the most recently-created bot.
Use with extreme caution. This is primarily intended for internal use cases where a static method is required, such as the callback
for a "apscheduler"-module task such as a reminder that is being invoked asynchronously and potentially across different instances of
the bot process where the specific instance of the bot is irrelevant.
"""
return WotvBot.__staticInstance
async def handleMessage(self, message: discord.Message):
"""Process the request and produce a response."""
# Bail out early if anything looks insane.
if message.author == self.wotv_bot_config.discord_client.user:
return (None, None)
if not message.content:
return (None, None)
if not message.content.startswith('!'):
return (None, None)
for ignore_pattern in WotvBotConstants.ALL_IGNORE_PATTERNS:
if ignore_pattern.match(message.content):
return (None, None)
# Set up the context used in handling every possible command.
# TODO: Clean up these fields that are not part of the CommandContextInfo object.
from_name = message.author.display_name
from_id = message.author.id
from_discrim = message.author.discriminator
context = CommandContextInfo()
context.from_discrim = from_discrim
context.from_id = from_id
context.from_name = from_name
context.original_message = message
# TODO: Hold these references longer after cleaning up the rest of the code, in an application context.
esper_resonance_manager = EsperResonanceManager(
self.wotv_bot_config.esper_resonance_spreadsheet_id,
self.wotv_bot_config.sandbox_esper_resonance_spreadsheet_id,
self.wotv_bot_config.access_control_spreadsheet_id,
self.wotv_bot_config.spreadsheet_app)
vision_card_manager = VisionCardManager(
self.wotv_bot_config.vision_card_spreadsheet_id,
self.wotv_bot_config.access_control_spreadsheet_id,
self.wotv_bot_config.spreadsheet_app)
# To support multi-line commands, we only match the command itself against the first line.
first_line_lower = message.content.splitlines()[0].lower()
match = WotvBotConstants.RES_FETCH_SELF_PATTERN.match(first_line_lower)
if match:
return self.handleTargetedResonanceLookupForSelf(context.shallowCopy().withMatch(match).withEsperResonanceManager(esper_resonance_manager))
match = WotvBotConstants.RES_LIST_SELF_PATTERN.match(first_line_lower)
if match:
return self.handleGeneralResonanceLookupForSelf(context.shallowCopy().withMatch(match).withEsperResonanceManager(esper_resonance_manager))
match = WotvBotConstants.RES_FETCH_OTHER_PATTERN.match(first_line_lower)
if match:
return self.handleTargetedResonanceLookupForOtherUser(context.shallowCopy().withMatch(match).withEsperResonanceManager(esper_resonance_manager))
match = WotvBotConstants.RES_SET_PATTERN.match(first_line_lower)
if match:
return self.handleResonanceSet(context.shallowCopy().withMatch(match).withEsperResonanceManager(esper_resonance_manager))
if WotvBotConstants.VISION_CARD_SET_PATTERN.match(first_line_lower):
return await self.handleVisionCardSet(context.shallowCopy().withVisionCardManager(vision_card_manager))
match = WotvBotConstants.VISION_CARD_FETCH_BY_NAME_PATTERN.match(first_line_lower)
if match:
return await self.handleVisionCardFetchByName(context.shallowCopy().withMatch(match).withVisionCardManager(vision_card_manager))
match = WotvBotConstants.VISION_CARD_ABILITY_SEARCH.match(first_line_lower)
if match:
return await self.handleVisionCardAbilitySearch(context.shallowCopy().withMatch(match).withVisionCardManager(vision_card_manager))
if WotvBotConstants.VISION_CARD_DEBUG_PATTERN.match(first_line_lower):
return await self.handleVisionCardDebug(context.shallowCopy().withVisionCardManager(vision_card_manager))
match = WotvBotConstants.FIND_SKILLS_BY_NAME_PATTERN.match(first_line_lower)
if match:
return await self.handleFindSkillsByName(context.shallowCopy().withMatch(match))
match = WotvBotConstants.FIND_SKILLS_BY_DESCRIPTION_PATTERN.match(first_line_lower)
if match:
return await self.handleFindSkillsByDescription(context.shallowCopy().withMatch(match))
match = WotvBotConstants.RICH_UNIT_SEARCH_PATTERN.match(first_line_lower)
if match:
return await self.handleRichUnitSearch(context.shallowCopy().withMatch(match))
match = WotvBotConstants.WHIMSY_REMINDER_PATTERN.match(first_line_lower)
if match:
return await self.handleWhimsyReminder(context.shallowCopy().withMatch(match))
match = WotvBotConstants.ROLLDICE_PATTERN.match(first_line_lower)
if match:
return await self.handleRoll(context.shallowCopy().withMatch(match))
# Predictions
match = WotvBotConstants.PREDICTION_PATTERN_1.match(first_line_lower)
if match:
return await self.handlePrediction(context.shallowCopy().withMatch(match))
match = WotvBotConstants.PREDICTION_PATTERN_2.match(first_line_lower)
if match:
return await self.handlePrediction(context.shallowCopy().withMatch(match))
match = WotvBotConstants.PREDICTION_PATTERN_3.match(first_line_lower)
if match:
return await self.handlePrediction(context.shallowCopy().withMatch(match))
match = WotvBotConstants.PREDICTION_PATTERN_4.match(first_line_lower)
if match:
return await self.handlePrediction(context.shallowCopy().withMatch(match))
match = WotvBotConstants.DOUBLE_DROP_RATES_SCHEDULE_PATTERN_1.match(first_line_lower)
if match:
return await self.handleSchedule(context.shallowCopy().withMatch(match))
match = WotvBotConstants.DOUBLE_DROP_RATES_SCHEDULE_PATTERN_2.match(first_line_lower)
if match:
return await self.handleMats(context.shallowCopy().withMatch(match))
match = WotvBotConstants.DAILY_REMINDERS.match(first_line_lower)
if match:
return await self.handleDailyReminders(context.shallowCopy().withMatch(match))
# Hidden utility command to look up the snowflake ID of your own user. This isn't secret or insecure, but it's also not common, so it isn't listed.
if first_line_lower.startswith('!whoami'):
return self.handleWhoAmI(context)
# Hidden utility command to look up the snowflake ID of a member. This isn't secret or insecure, but it's also not common, so it isn't listed.
match = WotvBotConstants.WHOIS_PATTERN.match(first_line_lower)
if match:
return await self.handleWhoIs(context.shallowCopy().withMatch(match))
if WotvBotConstants.ADMIN_ADD_ESPER_PATTERN.match(first_line_lower) or WotvBotConstants.SANDBOX_ADMIN_ADD_ESPER_PATTERN.match(message.content):
return self.handleAdminAddEsper(context.shallowCopy().withEsperResonanceManager(esper_resonance_manager))
if WotvBotConstants.ADMIN_ADD_UNIT_PATTERN.match(first_line_lower) or WotvBotConstants.SANDBOX_ADMIN_ADD_UNIT_PATTERN.match(message.content):
return self.handleAdminAddUnit(context.shallowCopy().withEsperResonanceManager(esper_resonance_manager))
if WotvBotConstants.ADMIN_ADD_VC_PATTERN.match(first_line_lower):
return self.handleAdminAddVisionCard(context.shallowCopy().withVisionCardManager(vision_card_manager))
if WotvBotConstants.ADMIN_ADD_USER_PATTERN.match(first_line_lower):
return self.handleAdminAddUser(context.shallowCopy().withEsperResonanceManager(esper_resonance_manager).withVisionCardManager(vision_card_manager))
if first_line_lower.startswith('!resonance'):
responseText = '<@{0}>: Invalid !resonance command. Use !help for more information.'.format(from_id)
return (responseText, None)
if first_line_lower.startswith('!help'):
responseText = WotvBotConstants.HELP.format(self.wotv_bot_config.esper_resonance_spreadsheet_id, self.wotv_bot_config.vision_card_spreadsheet_id)
return (responseText, None)
return ('<@{0}>: Invalid or unknown command. Use !help to see all supported commands and !admin-help to see special admin commands. '\
'Please do this via a direct message to the bot, to avoid spamming the channel.'.format(from_id), None)
def handleTargetedResonanceLookupForSelf(self, context: CommandContextInfo) -> (str, str):
"""Handle !res command for self-lookup of a specific (unit, esper) tuple."""
unit_name = context.command_match.group(1).strip()
esper_name = context.command_match.group(2).strip()
print('resonance fetch from user %s#%s, for user %s, for unit %s, for esper %s' % (
context.from_name, context.from_discrim, context.from_name, unit_name, esper_name))
resonance, pretty_unit_name, pretty_esper_name = context.esper_resonance_manager.readResonance(None, context.from_id, unit_name, esper_name)
responseText = '<@{0}>: {1}/{2} has resonance {3}'.format(context.from_id, pretty_unit_name, pretty_esper_name, resonance)
return (responseText, None)
def handleTargetedResonanceLookupForOtherUser(self, context: CommandContextInfo) -> (str, str):
"""Handle !res command for lookup of a specific (unit, esper) tuple for a different user."""
target_user_name = context.command_match.group(1).strip()
unit_name = context.command_match.group(2).strip()
esper_name = context.command_match.group(3).strip()
print('resonance fetch from user %s#%s, for user %s, for unit %s, for esper %s' % (
context.from_name, context.from_discrim, target_user_name, unit_name, esper_name))
resonance, pretty_unit_name, pretty_esper_name = context.esper_resonance_manager.readResonance(target_user_name, None, unit_name, esper_name)
responseText = '<@{0}>: for user {1}, {2}/{3} has resonance {4}'.format(
context.from_id, target_user_name, pretty_unit_name, pretty_esper_name, resonance)
return (responseText, None)
def handleGeneralResonanceLookupForSelf(self, context: CommandContextInfo) -> (str, str):
"""Handle !res command for self-lookup of all resonance for a given unit or esper."""
target_name = context.command_match.group('target_name').strip()
print('resonance list fetch from user %s#%s, for target %s' % (context.from_name, context.from_discrim, target_name))
pretty_name, resonance_listing = context.esper_resonance_manager.readResonanceList(None, context.from_id, target_name)
responseText = '<@{0}>: resonance listing for {1}:\n{2}'.format(context.from_id, pretty_name, resonance_listing)
return (responseText, None)
def handleResonanceSet(self, context: CommandContextInfo) -> (str, str):
"""Handle !res-set command to set resonance for a specific unit and esper tuple."""
unit_name = context.command_match.group('unit').strip()
esper_name = context.command_match.group('esper').strip()
resonance_numeric_string = context.command_match.group('resonance_level').strip()
priority = None
if context.command_match.group('priority'):
priority = context.command_match.group('priority').strip()
comment = None
if context.command_match.group('comment'):
comment = context.command_match.group('comment').strip()
print('resonance set from user %s#%s, for unit %s, for esper %s, to resonance %s, with priority %s, comment %s' % (
context.from_name, context.from_discrim, unit_name, esper_name, resonance_numeric_string, priority, comment))
old_resonance, new_resonance, pretty_unit_name, pretty_esper_name = context.esper_resonance_manager.setResonance(
context.from_id, unit_name, esper_name, resonance_numeric_string, priority, comment)
responseText = '<@{0}>: {1}/{2} resonance has been set to {3} (was: {4})'.format(
context.from_id, pretty_unit_name, pretty_esper_name, new_resonance, old_resonance)
if (resonance_numeric_string and int(resonance_numeric_string) == 10):
# reaction = '\U0001F4AA' # CLDR: flexed biceps
reaction = '\U0001F3C6' # CLDR: trophy
else:
reaction = '\U00002705' # CLDR: check mark button
return (responseText, reaction)
def handleWhoAmI(self, context: CommandContextInfo) -> (str, str):
"""Handle !whoami command to fetch your own snowflake ID."""
responseText = '<@{id}>: Your snowflake ID is {id}'.format(id=context.from_id)
return (responseText, None)
async def handleWhoIs(self, context: CommandContextInfo) -> (str, str):
"""Handle !whois command to fetch the snowflake ID for a given user."""
original_match = WotvBotConstants.WHOIS_PATTERN.match(context.original_message.content) # Fetch original-case name
target_member_name = original_match.group('server_handle').strip()
# As of December 2020, possibly earlier, the following line no longer works:
# members = context.original_message.guild.members
# Instead have to fetch the list from the server, and enable the "SERVER MEMBERS INTENT" permission in the bot admin page on Discord.
members = await context.original_message.guild.fetch_members(limit=1000).flatten()
for member in members:
if member.name == target_member_name:
responseText = '<@{0}>: the snowflake ID for {1} is {2}'.format(context.from_id, target_member_name, member.id)
return (responseText, None)
responseText = '<@{0}>: no such member {1}'.format(context.from_id, target_member_name)
return (responseText, None)
def handleAdminAddEsper(self, context: CommandContextInfo) -> (str, str):
"""Handle !admin-add-esper and !sandbox-admin-add-esper commands to add a new esper to the resonance tracker."""
sandbox = True
match = WotvBotConstants.ADMIN_ADD_ESPER_PATTERN.match(context.original_message.content)
if match:
sandbox = False
else:
match = WotvBotConstants.SANDBOX_ADMIN_ADD_ESPER_PATTERN.match(context.original_message.content)
esper_name = match.group('name').strip()
esper_url = match.group('url').strip()
left_or_right_of = match.group('left_or_right_of').strip()
column = match.group('column').strip()
print('esper add (sandbox mode={6}) from user {0}#{1}, for esper {2}, url {3}, position {4}, column {5}'.format(
context.from_name, context.from_discrim, esper_name, esper_url, left_or_right_of, column, sandbox))
context.esper_resonance_manager.addEsperColumn(context.from_id, esper_name, esper_url, left_or_right_of, column, sandbox)
responseText = '<@{0}>: Added esper {1}!'.format(context.from_id, esper_name)
return (responseText, None)
def handleAdminAddUnit(self, context: CommandContextInfo) -> (str, str):
"""Handle !admin-add-unit and !sandbox-admin-add-unit commands to add a new unit to the resonance tracker."""
sandbox = True
match = WotvBotConstants.ADMIN_ADD_UNIT_PATTERN.match(context.original_message.content)
if match:
sandbox = False
else:
match = WotvBotConstants.SANDBOX_ADMIN_ADD_UNIT_PATTERN.match(context.original_message.content)
unit_name = match.group('name').strip()
unit_url = match.group('url').strip()
above_or_below = match.group('above_or_below').strip()
row1Based = match.group('row1Based').strip()
print('unit add (sandbox mode={6}) from user {0}#{1}, for unit {2}, url {3}, position {4}, row {5}'.format(
context.from_name, context.from_discrim, unit_name, unit_url, above_or_below, row1Based, sandbox))
context.esper_resonance_manager.addUnitRow(context.from_id, unit_name, unit_url, above_or_below, row1Based, sandbox)
responseText = '<@{0}>: Added unit {1}!'.format(context.from_id, unit_name)
return (responseText, None)
def handleAdminAddVisionCard(self, context: CommandContextInfo) -> (str, str):
"""Handle !admin-add-vc command to add a new vision card."""
match = WotvBotConstants.ADMIN_ADD_VC_PATTERN.match(context.original_message.content)
card_name = match.group('name').strip()
card_url = match.group('url').strip()
above_or_below = match.group('above_or_below').strip()
row1Based = match.group('row1Based').strip()
print('vc add from user {0}#{1}, for card {2}, url {3}, position {4}, row {5}'.format(
context.from_name, context.from_discrim, card_name, card_url, above_or_below, row1Based))
context.vision_card_manager.addVisionCardRow(context.from_id, card_name, card_url, above_or_below, row1Based)
responseText = '<@{0}>: Added card {1}!'.format(context.from_id, card_name)
return (responseText, None)
def handleAdminAddUser(self, context: CommandContextInfo) -> (str, str):
"""Handle !admin-add-user command to add a new unit to the resonance tracker and the administrative spreadsheet."""
if not AdminUtils.isAdmin(self.wotv_bot_config.spreadsheet_app, self.wotv_bot_config.access_control_spreadsheet_id, context.from_id):
raise ExposableException('You do not have permission to add a user.')
match = WotvBotConstants.ADMIN_ADD_USER_PATTERN.match(context.original_message.content)
snowflake_id = match.group('snowflake_id').strip()
nickname = match.group('nickname').strip()
user_type = match.group('user_type').strip().lower()
is_admin = False
if user_type == 'admin':
is_admin = True
print('user add from user {0}#{1}, for snowflake_id {2}, nickname {3}, is_admin {4}'.format(
context.from_name, context.from_discrim, snowflake_id, nickname, is_admin))
AdminUtils.addUser(self.wotv_bot_config.spreadsheet_app, self.wotv_bot_config.access_control_spreadsheet_id, nickname, snowflake_id, is_admin)
context.esper_resonance_manager.addUser(nickname)
context.vision_card_manager.addUser(nickname)
responseText = '<@{0}>: Added user {1}!'.format(context.from_id, nickname)
return (responseText, None)
async def handleVisionCardDebug(self, context: CommandContextInfo) -> (str, str):
"""Handle !xocr and !xocr-debug commands to perform OCR on a Vision Card."""
return await self.handleVisionCardSet(context, is_debug=True)
async def handleVisionCardSet(self, context: CommandContextInfo, is_debug: bool = False) -> (str, str):
"""Handle !vc-set"""
# Try to extract text from a vision card screenshot that is sent as an attachment to this message.
url = context.original_message.attachments[0].url
print('Vision Card OCR request from user %s#%s, for url %s' % (context.from_name, context.from_discrim, url))
screenshot = None
if self.INTEG_TEST_LOCAL_FILESYSTEM_READ_FOR_VISION_CARD:
screenshot = VisionCardOcrUtils.loadScreenshotFromFilesystem(url)
else:
screenshot = VisionCardOcrUtils.downloadScreenshotFromUrl(url)
vision_card = VisionCardOcrUtils.extractVisionCardFromScreenshot(screenshot, is_debug)
if is_debug:
combined_image = VisionCardOcrUtils.mergeDebugImages(vision_card)
buffer = io.BytesIO()
combined_image.save(buffer, format='PNG')
buffer.seek(0)
temp_file = discord.File(buffer, filename='Intermediate OCR Debug.png')
await context.original_message.channel.send('Intermediate OCR Debug. Raw info text:\n```{0}```\nRaw stats text: ```{1}```'.format(
vision_card.info_debug_raw_text,
vision_card.stats_debug_raw_text), file=temp_file)
# Print errors to the console, but do not return them as we cannot guarantee that there is no sensitive
# information in here, such as possible library exceptions, i/o exceptions, etceteras.
if vision_card.error_messages is not None and len(vision_card.error_messages) > 0:
print('errors found during vision card conversion: ' + str(vision_card.error_messages))
reaction = None
if vision_card.successfully_extracted is True:
responseText = '<@{0}>: {1}'.format(context.from_id, vision_card.prettyPrint())
if not is_debug:
context.vision_card_manager.setVisionCard(context.from_id, vision_card)
reaction = '\U00002705' # CLDR: check mark button
else:
responseText = '<@{0}>: Vision card extraction has failed. You may try again with !vc-debug for a clue about what has gone wrong'.format(
context.from_id)
return (responseText, reaction)
async def handleVisionCardFetchByName(self, context: CommandContextInfo) -> (str, str):
"""Handle !vc command for self-lookup of a given vision card by name"""
target_name = context.command_match.group('target_name').strip()
print('vision card fetch from user %s#%s, for target %s' % (context.from_name, context.from_discrim, target_name))
vision_card = context.vision_card_manager.readVisionCardByName(None, context.from_id, target_name)
responseText = '<@{0}>: Vision Card:\n{1}'.format(context.from_id, str(vision_card.prettyPrint()))
return (responseText, None)
async def handleVisionCardAbilitySearch(self, context: CommandContextInfo) -> (str, str):
"""Handle !vc-ability command for self-lookup of a given vision card by party/bestowed ability fuzzy-match"""
search_text = context.command_match.group('search_text').strip()
print('vision card ability search from user %s#%s, for text %s' % (context.from_name, context.from_discrim, search_text))
vision_cards = context.vision_card_manager.searchVisionCardsByAbility(None, context.from_id, search_text)
if len(vision_cards) == 0:
responseText = '<@{0}>: No vision cards matched the ability search.'.format(context.from_id)
return (responseText, None)
responseText = '<@{0}>: Matching Vision Cards:\n'.format(context.from_id)
for vision_card in vision_cards:
responseText += ' ' + vision_card.Name + '\n'
responseText += ' Party Ability: ' + vision_card.PartyAbility + '\n'
for bestowed_effect in vision_card.BestowedEffects:
responseText += ' Bestowed Effect: ' + bestowed_effect + '\n'
return (responseText, None)
@staticmethod
def rarityAndElementParenthetical(unit: WotvUnit) -> str:
"""Generate a parenthetical string with the unit's rarity and element(s)"""
text = '(' + str(unit.rarity) + ' rarity, '
if not unit.elements:
return text + 'no element)'
text += unit.elements[0]
if len(unit.elements) > 1:
for element in unit.elements[1:]:
text += '/' + str(element)
text += ' element'
if len(unit.elements) > 1:
text += 's'
return text + ')'
def prettyPrintUnitSkillSearchResult(self, result: UnitSkillSearchResult):
"""Print a useful, human-readable description of the skill match including the unit name, element, rarity, the skill name,
and how the skill is unlocked."""
if result.is_master_ability:
return 'Master ability for ' + result.unit.name + ' ' + WotvBot.rarityAndElementParenthetical(result.unit) + ': ' + result.skill.description
if result.is_limit_burst:
return 'Limit burst (' + result.skill.name + ') for ' + result.unit.name + ' ' + WotvBot.rarityAndElementParenthetical(result.unit) + ': ' + result.skill.description
text = 'Skill "' + result.skill.name + '" learned by ' + result.unit.name
text += ' ' + WotvBot.rarityAndElementParenthetical(result.unit)
text += ' with job ' + result.board_skill.unlocked_by_job.name + ' at job level ' + str(result.board_skill.unlocked_by_job_level)
text += ': ' + result.skill.description
return text
def prettyPrintUnitJobSearchResult(self, result: UnitJobSearchResult):
"""Print a useful, human-readable description of the job match including the unit name, element, rarity, and job name."""
text = 'Job "' + result.job.name + '" learned by ' + result.unit.name
text += ' ' + WotvBot.rarityAndElementParenthetical(result.unit)
return text
def prettyPrintUnitSearchResult(self, result: UnitSearchResult):
"""Print a useful, human-readable description of any search result, as appropriate to the type."""
if hasattr(result, 'is_master_ability'):
return self.prettyPrintUnitSkillSearchResult(result)
elif hasattr(result, 'job'):
return self.prettyPrintUnitJobSearchResult(result)
else:
return result.unit.name + ' ' + WotvBot.rarityAndElementParenthetical(result.unit)
@staticmethod
def getExtraCommandLines(context: CommandContextInfo):
"""Extract all extra non-empty lines from a command and return them as a list."""
lines = context.original_message.content.splitlines()
extra_lines = []
if len(lines) > 1:
for line in lines[1:]:
line = line.strip()
if line:
extra_lines.append(line)
return extra_lines
# Deprecated - Use rich unit search instead, e.g. "!unit-search skill-name <search_text>"
async def handleFindSkillsByName(self, context: CommandContextInfo) -> (str, str):
"""Handle !skills-by-name command"""
search_text = context.command_match.group('search_text').strip()
print('skills-by-name search from user %s#%s, for text %s' % (context.from_name, context.from_discrim, search_text))
refinements = WotvBot.getExtraCommandLines(context)
if len(refinements) > 0:
print(' refinements: ' + str(refinements))
results = DataFileSearchUtils.richUnitSearch(self.wotv_bot_config.data_files, 'skill-name', search_text, refinements)
if len(results) == 0:
responseText = '<@{0}>: No skills matched the search.'.format(context.from_id)
return (responseText, None)
responseText = '<@{0}>: Matching Skills:\n'.format(context.from_id)
results = sorted(results, key=lambda one_result : one_result.unit.name)
truncated = False
if len(results) > 25:
results = results[:25]
truncated = True
for result in results:
responseText += self.prettyPrintUnitSearchResult(result) + '\n'
if truncated:
responseText += 'Results truncated because there were too many.'
return (responseText.strip(), None)
# Deprecated - Use rich unit search instead, e.g. "!unit-search skill-desc <search_text>"
async def handleFindSkillsByDescription(self, context: CommandContextInfo) -> (str, str):
"""Handle !skills-by-desc command"""
search_text = context.command_match.group('search_text').strip()
print('skills-by-description search from user %s#%s, for text %s' % (context.from_name, context.from_discrim, search_text))
refinements = WotvBot.getExtraCommandLines(context)
if len(refinements) > 0:
print(' refinements: ' + str(refinements))
results = DataFileSearchUtils.richUnitSearch(self.wotv_bot_config.data_files, 'skill-desc', search_text, refinements)
if len(results) == 0:
responseText = '<@{0}>: No skills matched the search.'.format(context.from_id)
return (responseText, None)
responseText = '<@{0}>: Matching Skills:\n'.format(context.from_id)
results = sorted(results, key=lambda one_result : one_result.unit.name)
truncated = False
if len(results) > 25:
results = results[:25]
truncated = True
for result in results:
responseText += self.prettyPrintUnitSearchResult(result) + '\n'
if truncated:
responseText += 'Results truncated because there were too many.'
return (responseText.strip(), None)
async def handleRichUnitSearch(self, context: CommandContextInfo) -> (str, str):
"""Handle !unit-search command"""
search_type = context.command_match.group('search_type').strip()
search_text = None
if search_type != 'all':
search_text = context.command_match.group('search_text').strip()
print('unit search from user %s#%s, type %s, text %s' % (context.from_name, context.from_discrim, search_type, search_text))
refinements = WotvBot.getExtraCommandLines(context)
if len(refinements) > 0:
print(' refinements: ' + str(refinements))
results = DataFileSearchUtils.richUnitSearch(self.wotv_bot_config.data_files, search_type, search_text, refinements)
if len(results) == 0:
responseText = '<@{0}>: No units matched the search.'.format(context.from_id)
return (responseText, None)
responseText = '<@{0}>: Results:\n'.format(context.from_id)
results = sorted(results, key=lambda one_result : one_result.unit.name)
truncated = False
if len(results) > 25:
results = results[:25]
truncated = True
for result in results:
responseText += self.prettyPrintUnitSearchResult(result) + '\n'
if truncated:
responseText += 'Results truncated because there were too many.'
return (responseText.strip(), None)
@staticmethod
async def whimsyShopNrgReminderCallback(target_channel_id: str, from_id: str):
"""Handles a reminder callback for a whimsy shop nrg reminder."""
discord_client: discord.Client = WotvBot.getStaticInstance().wotv_bot_config.discord_client
text_channel: discord.TextChannel = discord_client.get_channel(target_channel_id)
#discord_client.loop.create_task(text_channel.send(content = '<@{0}>: This is your requested whimsy shop reminder: NRG spent will now start counting towards the next Whimsy Shop.'.format(from_id)))
await text_channel.send(content = '<@{0}>: This is your requested whimsy shop reminder: NRG spent will now start counting towards the next Whimsy Shop.'.format(from_id))
@staticmethod
async def whimsyShopSpawnReminderCallback(target_channel_id: str, from_id: str):
"""Handles a reminder callback for a whimsy shop spawn reminder."""
discord_client: discord.Client = WotvBot.getStaticInstance().wotv_bot_config.discord_client
text_channel: discord.TextChannel = discord_client.get_channel(target_channel_id)
#discord_client.loop.create_task(text_channel.send(content = '<@{0}>: This is your requested whimsy shop reminder: The Whimsy Shop is ready to spawn again.'.format(from_id)))
await text_channel.send(content = '<@{0}>: This is your requested whimsy shop reminder: The Whimsy Shop is ready to spawn again.'.format(from_id))
async def handleWhimsyReminder(self, context: CommandContextInfo) -> (str, str):
"""Handle !whimsy command for a whimsy reminder"""
reminders = self.wotv_bot_config.reminders # Shorthand
owner_id = str(context.from_id) # Shorthand
command = '<none>'
if context.command_match.group('command'):
command = context.command_match.group('command').strip()
print('Whimsy reminder request from user %s#%s, command %s' % (context.from_name, context.from_discrim, command))
responseText = '<@{0}>: Unknown/unsupported !whimsy command. Use !help for for more information.'.format(context.from_id)
# Default behavior - be smart. If the user has got a reminder set, don't overwrite it unless they pass set-reminder as the command.
# If they do not have a reminder set, go ahead and set it now.
append_overwrite_reminder_message = False # Whether or not to add some reminder text to the message
if command == '<none>':
# Check if an existing reminder is set. If so prompt to overwrite...
if reminders.hasPendingWhimsyNrgReminder(owner_id) or reminders.hasPendingWhimsySpawnReminder(owner_id):
command = 'when'
append_overwrite_reminder_message = True # Remind the user how to overwrite the current timer.
else:
command = 'set-reminder' # Assume the user wants to set a reminder.
if command == 'set-reminder':
append_existing_canceled_message = reminders.hasPendingWhimsyNrgReminder(owner_id) or reminders.hasPendingWhimsySpawnReminder(owner_id)
nrg_callback: callable = WotvBot.whimsyShopNrgReminderCallback
nrg_params = [context.original_message.channel.id, owner_id]
spawn_callback: callable = WotvBot.whimsyShopSpawnReminderCallback
spawn_params = nrg_params
reminders.addWhimsyReminder(context.from_name, owner_id, nrg_callback, nrg_params, spawn_callback, spawn_params,
self.whimsy_shop_nrg_reminder_delay_ms, self.whimsy_shop_spawn_reminder_delay_ms)
responseText = '<@{0}>: Your reminder has been set.'.format(context.from_id)
if append_existing_canceled_message:
responseText += ' Your previous outstanding reminder has been discarded.'
elif command == 'when':
if reminders.hasPendingWhimsyNrgReminder(owner_id):
time_left_minutes = int(reminders.timeTillWhimsyNrgReminder(owner_id) / 60)
responseText = '<@{0}>: NRG spent will start counting towards the next Whimsy Shop in about {1} minutes.'.format(owner_id, str(time_left_minutes))
if append_overwrite_reminder_message:
responseText += ' To force the timer to reset to 60 minutes *immediately*, use the command "!whimsy set-reminder".'
elif reminders.hasPendingWhimsySpawnReminder(owner_id):
time_left_minutes = int(reminders.timeTillWhimsySpawnReminder(owner_id) / 60)
responseText = '<@{0}>: The Whimsy Shop will be ready to spawn again in about {1} minutes.'.format(owner_id, str(time_left_minutes))
if append_overwrite_reminder_message:
responseText += ' To force the timer to reset to 60 minutes *immediately*, use the command "!whimsy set-reminder".'
else:
responseText = '<@{0}>: You do not currently have a whimsy reminder set.'.format(context.from_id)
elif command == 'cancel':
reminders.cancelWhimsyReminders(owner_id)
responseText = '<@{0}>: Any and all outstanding whimsy reminders have been canceled.'.format(context.from_id)
return (responseText, None)
async def handleRoll(self, context: CommandContextInfo) -> (str, str):
"""Handle !roll command to simulate a dice roll."""
spec: DiceSpec = DiceSpec.parse(context.command_match.group('dice_spec'))
print('Dice roll request from user %s#%s, spec %s' % (context.from_name, context.from_discrim, str(spec)))
if spec.num_dice > 50:
responseText = '<@{0}>: Too many dice in !roll command (max 50). Use !help for for more information.'.format(context.from_id)
else:
results: List[int] = Rolling.rollDice(spec)
total = 0
for one_roll in results:
total += one_roll
responseText = '<@{0}>: Rolled a total of {1}. Dice values were: {2}'.format(context.from_id, str(total), str(results))
return (responseText.strip(), None)
async def handlePrediction(self, context: CommandContextInfo) -> (str, str):
"""Handle !predict/astrologize/divine/foretell (etc) command to make a funny prediction."""
query = context.command_match.group('query')
print('Prediction request from user %s#%s, query %s' % (context.from_name, context.from_discrim, str(query)))
responseText = '<@{0}>: {1}'.format(context.from_id, self.predictions.predict(query))
return (responseText.strip(), None)
async def handleSchedule(self, context: CommandContextInfo) -> (str, str):
"""Handle a request for the weekly schedule."""
print('Schedule request from user %s#%s' % (context.from_name, context.from_discrim))
responseText = '<@{0}>:\n{1}'.format(context.from_id, WeeklyEventSchedule.getDoubleDropRateSchedule('** >> ', ' << **'))
return (responseText.strip(), None)
async def handleMats(self, context: CommandContextInfo) -> (str, str):
"""Handle a request for the current double-drop rate room."""
print('Mats request from user %s#%s' % (context.from_name, context.from_discrim))
responseText = '<@{0}>:\n'.format(context.from_id)
responseText += 'Today: ' + WeeklyEventSchedule.getTodaysDoubleDropRateEvents() + '\n'
responseText += 'Tomorrow: ' + WeeklyEventSchedule.getTomorrowsDoubleDropRateEvents() + '\n'
responseText += 'For the full schedule, use !schedule.'
return (responseText.strip(), None)
async def createOrResetPeriodicStatusUpdateCallback(self):
"""Create or reset the status update callback for the entire bot."""
self.wotv_bot_config.reminders.createOrResetPeriodicStatusUpdateCallback(WotvBot.periodicStatusUpdateCallback)
@staticmethod
async def periodicStatusUpdateCallback():
"""Handles a callback for a periodic status update."""
bot: WotvBot = WotvBot.getStaticInstance()
discord_client: discord.Client = bot.wotv_bot_config.discord_client
new_status = WeeklyEventSchedule.getTodaysDoubleDropRateEvents()
if bot.last_status is None or bot.last_status != new_status:
print('Updating bot status to: ' + new_status)
# Apparently bots cannot use a custom status so gotta stick with a regular one like "Playing" (Game)
await discord_client.change_presence(activity=discord.Game(name=new_status))
bot.last_status = new_status
@staticmethod
async def dailyReminderCallback(target_channel_id: str, from_id: str, requested_reminders: List[str]):
"""Handles a reminder callback for daily reminders."""
discord_client: discord.Client = WotvBot.getStaticInstance().wotv_bot_config.discord_client
text_channel: discord.TextChannel = discord_client.get_channel(target_channel_id)
reminder_text = '<@{0}>: This is your requested daily reminder. Cancel daily reminders with "!daily-reminders none" or use "!help".'.format(from_id)
if 'mats' in requested_reminders:
reminder_text += '\n Today\'s daily double rate drops are: ' + WeeklyEventSchedule.getTodaysDoubleDropRateEvents()
await text_channel.send(content = reminder_text)
async def handleDailyReminders(self, context: CommandContextInfo) -> (str, str):
"""Handle !daily-reminders command for various daily reminders, such as double-drop-rates"""
reminders = self.wotv_bot_config.reminders # Shorthand
owner_id = str(context.from_id) # Shorthand
reminder_list_str = '<default>'
if context.command_match.group('reminder_list'):
reminder_list_str = context.command_match.group('reminder_list').strip()
print('Daily reminders request from user %s#%s, reminder list %s' % (context.from_name, context.from_discrim, reminder_list_str))
responseText = '<@{0}>: Unknown/unsupported !daily-reminders command. Use !help for for more information.'.format(context.from_id)
requested_reminders: List[str] = reminder_list_str.split(',')
configured_reminders_message = '<@{0}>: Your daily reminders have been configured:'.format(context.from_id)
# Default behavior - be smart. If the user has got a reminder set, don't overwrite it unless they pass "none" as the list.
if reminder_list_str == '<default>':
if reminders.hasDailyReminder(owner_id):
responseText = '<@{0}>: You have daily reminders configured. To clear them, use "!daily-reminders none".'.format(context.from_id)
else:
responseText = '<@{0}>: You do not currently have daily reminders configured. Use !help for more information.'.format(context.from_id)
elif reminder_list_str == 'none':
reminders.cancelDailyReminder(owner_id)
responseText = '<@{0}>: Your daily reminders have been canceled.'.format(context.from_id)
else:
added_reminders = []
if 'mats' in requested_reminders:
configured_reminders_message += '\n daily double-drop rate reminder ("mats")'
added_reminders.append('mats')
callback: callable = WotvBot.dailyReminderCallback
callback_params = [context.original_message.channel.id, owner_id, added_reminders]
reminders.addDailyReminder(context.from_name, owner_id, callback, callback_params)
responseText = configured_reminders_message
return (responseText, None)
| """The runtime heart of the WOTV Bot."""
from __future__ import annotations
from dataclasses import dataclass
import io
from re import Match
from typing import List
import discord
from admin_utils import AdminUtils
from data_files import DataFiles
from data_file_search_utils import DataFileSearchUtils, UnitSkillSearchResult, UnitJobSearchResult, UnitSearchResult
from data_file_core_classes import WotvUnit
from esper_resonance_manager import EsperResonanceManager
from predictions import Predictions
from reminders import Reminders
from rolling import DiceSpec, Rolling
from vision_card_ocr_utils import VisionCardOcrUtils
from vision_card_manager import VisionCardManager
from weekly_event_schedule import WeeklyEventSchedule
from wotv_bot_common import ExposableException
from wotv_bot_constants import WotvBotConstants
class DiscordSafeException(ExposableException):
"""An exception whose error text is safe to show in Discord."""
def __init__(self, message):
super(DiscordSafeException, self).__init__(message)
self.message = message
@dataclass
class WotvBotConfig:
"""Configuration for a single instance of the bot. All fields are required to be set.
access_control_spreadsheet_id: the ID of the spreadsheet where access controls are kept
esper_resonance_spreadsheet_id: the ID of the spreadsheet where esper resonance is tracked
sandbox_esper_resonance_spreadsheet_id: the ID of the sandbox alternative to the real esper_resonance_spreadsheet_id
vision_card_spreadsheet_id: the ID of the spreadsheet where vision cards are tracked
spreadsheet_app: the Google spreadsheets Resource obtained from calling the spreadsheets() method on a Service Resource.
discord_client: the Discord client
data_files: the WotV data dump.
reminders: the reminders subsystem.
"""
access_control_spreadsheet_id: str = None
esper_resonance_spreadsheet_id: str = None
sandbox_esper_resonance_spreadsheet_id: str = None
vision_card_spreadsheet_id: str = None
spreadsheet_app = None
discord_client: discord.Client = None
data_files: DataFiles = None
reminders: Reminders = None
@dataclass
class CommandContextInfo:
"""Context information for the command that is being executed."""
from_name: str = None # Convenience
from_id: str = None # Convenience
from_discrim: str = None # Convenience
original_message: discord.Message = None # For unusual use cases
esper_resonance_manager: EsperResonanceManager = None
vision_card_manager: VisionCardManager = None
command_match: Match = None
def shallowCopy(self) -> CommandContextInfo:
"""Make a shallow copy of this object, containing only the from_name, from_id, from_discrim and original_message fields"""
result = CommandContextInfo()
result.from_name = self.from_name
result.from_id = self.from_id
result.from_name = self.from_name
result.original_message = self.original_message
return result
def withEsperResonanceManager(self, esper_resonance_manager: EsperResonanceManager) -> CommandContextInfo:
"""Assign the specified esper resonance manager and return a reference to this object."""
self.esper_resonance_manager = esper_resonance_manager
return self
def withVisionCardManager(self, vision_card_manager: VisionCardManager) -> CommandContextInfo:
"""Assign the specified vision card manager and return a reference to this object."""
self.vision_card_manager = vision_card_manager
return self
def withMatch(self, the_match: Match) -> CommandContextInfo:
"""Assign the specified match and return a reference to this object."""
self.command_match = the_match
return self
class WotvBot:
"""An instance of the bot, configured to manage specific spreadsheets and using Discord and Google credentials."""
# The static instance of the bot, not for general consumption.
__staticInstance: WotvBot = None
def __init__(self, wotv_bot_config: WotvBotConfig):
self.wotv_bot_config = wotv_bot_config
# Set this to true in an integration test to allow a local filesystem path to be used in a Discord
# message as the source of the image to be processed by OCR for Vision Card text extraction. For
# obvious security reasons, this is false by default.
self.INTEG_TEST_LOCAL_FILESYSTEM_READ_FOR_VISION_CARD = False
# Set the static instance of the bot to this instance.
WotvBot.__staticInstance = self
self.whimsy_shop_nrg_reminder_delay_ms: int = 30*60*1000 # 30 minutes
self.whimsy_shop_spawn_reminder_delay_ms: int = 60*60*1000 # 60 minutes
self.predictions = Predictions('predictions.txt')
self.predictions.refreshPredictions()
self.last_status = None # Last status set
@staticmethod
def getStaticInstance():
"""Returns an unsafe static reference to the "current" bot, if there is one. In reality this is just the most recently-created bot.
Use with extreme caution. This is primarily intended for internal use cases where a static method is required, such as the callback
for a "apscheduler"-module task such as a reminder that is being invoked asynchronously and potentially across different instances of
the bot process where the specific instance of the bot is irrelevant.
"""
return WotvBot.__staticInstance
async def handleMessage(self, message: discord.Message):
"""Process the request and produce a response."""
# Bail out early if anything looks insane.
if message.author == self.wotv_bot_config.discord_client.user:
return (None, None)
if not message.content:
return (None, None)
if not message.content.startswith('!'):
return (None, None)
for ignore_pattern in WotvBotConstants.ALL_IGNORE_PATTERNS:
if ignore_pattern.match(message.content):
return (None, None)
# Set up the context used in handling every possible command.
# TODO: Clean up these fields that are not part of the CommandContextInfo object.
from_name = message.author.display_name
from_id = message.author.id
from_discrim = message.author.discriminator
context = CommandContextInfo()
context.from_discrim = from_discrim
context.from_id = from_id
context.from_name = from_name
context.original_message = message
# TODO: Hold these references longer after cleaning up the rest of the code, in an application context.
esper_resonance_manager = EsperResonanceManager(
self.wotv_bot_config.esper_resonance_spreadsheet_id,
self.wotv_bot_config.sandbox_esper_resonance_spreadsheet_id,
self.wotv_bot_config.access_control_spreadsheet_id,
self.wotv_bot_config.spreadsheet_app)
vision_card_manager = VisionCardManager(
self.wotv_bot_config.vision_card_spreadsheet_id,
self.wotv_bot_config.access_control_spreadsheet_id,
self.wotv_bot_config.spreadsheet_app)
# To support multi-line commands, we only match the command itself against the first line.
first_line_lower = message.content.splitlines()[0].lower()
match = WotvBotConstants.RES_FETCH_SELF_PATTERN.match(first_line_lower)
if match:
return self.handleTargetedResonanceLookupForSelf(context.shallowCopy().withMatch(match).withEsperResonanceManager(esper_resonance_manager))
match = WotvBotConstants.RES_LIST_SELF_PATTERN.match(first_line_lower)
if match:
return self.handleGeneralResonanceLookupForSelf(context.shallowCopy().withMatch(match).withEsperResonanceManager(esper_resonance_manager))
match = WotvBotConstants.RES_FETCH_OTHER_PATTERN.match(first_line_lower)
if match:
return self.handleTargetedResonanceLookupForOtherUser(context.shallowCopy().withMatch(match).withEsperResonanceManager(esper_resonance_manager))
match = WotvBotConstants.RES_SET_PATTERN.match(first_line_lower)
if match:
return self.handleResonanceSet(context.shallowCopy().withMatch(match).withEsperResonanceManager(esper_resonance_manager))
if WotvBotConstants.VISION_CARD_SET_PATTERN.match(first_line_lower):
return await self.handleVisionCardSet(context.shallowCopy().withVisionCardManager(vision_card_manager))
match = WotvBotConstants.VISION_CARD_FETCH_BY_NAME_PATTERN.match(first_line_lower)
if match:
return await self.handleVisionCardFetchByName(context.shallowCopy().withMatch(match).withVisionCardManager(vision_card_manager))
match = WotvBotConstants.VISION_CARD_ABILITY_SEARCH.match(first_line_lower)
if match:
return await self.handleVisionCardAbilitySearch(context.shallowCopy().withMatch(match).withVisionCardManager(vision_card_manager))
if WotvBotConstants.VISION_CARD_DEBUG_PATTERN.match(first_line_lower):
return await self.handleVisionCardDebug(context.shallowCopy().withVisionCardManager(vision_card_manager))
match = WotvBotConstants.FIND_SKILLS_BY_NAME_PATTERN.match(first_line_lower)
if match:
return await self.handleFindSkillsByName(context.shallowCopy().withMatch(match))
match = WotvBotConstants.FIND_SKILLS_BY_DESCRIPTION_PATTERN.match(first_line_lower)
if match:
return await self.handleFindSkillsByDescription(context.shallowCopy().withMatch(match))
match = WotvBotConstants.RICH_UNIT_SEARCH_PATTERN.match(first_line_lower)
if match:
return await self.handleRichUnitSearch(context.shallowCopy().withMatch(match))
match = WotvBotConstants.WHIMSY_REMINDER_PATTERN.match(first_line_lower)
if match:
return await self.handleWhimsyReminder(context.shallowCopy().withMatch(match))
match = WotvBotConstants.ROLLDICE_PATTERN.match(first_line_lower)
if match:
return await self.handleRoll(context.shallowCopy().withMatch(match))
# Predictions
match = WotvBotConstants.PREDICTION_PATTERN_1.match(first_line_lower)
if match:
return await self.handlePrediction(context.shallowCopy().withMatch(match))
match = WotvBotConstants.PREDICTION_PATTERN_2.match(first_line_lower)
if match:
return await self.handlePrediction(context.shallowCopy().withMatch(match))
match = WotvBotConstants.PREDICTION_PATTERN_3.match(first_line_lower)
if match:
return await self.handlePrediction(context.shallowCopy().withMatch(match))
match = WotvBotConstants.PREDICTION_PATTERN_4.match(first_line_lower)
if match:
return await self.handlePrediction(context.shallowCopy().withMatch(match))
match = WotvBotConstants.DOUBLE_DROP_RATES_SCHEDULE_PATTERN_1.match(first_line_lower)
if match:
return await self.handleSchedule(context.shallowCopy().withMatch(match))
match = WotvBotConstants.DOUBLE_DROP_RATES_SCHEDULE_PATTERN_2.match(first_line_lower)
if match:
return await self.handleMats(context.shallowCopy().withMatch(match))
match = WotvBotConstants.DAILY_REMINDERS.match(first_line_lower)
if match:
return await self.handleDailyReminders(context.shallowCopy().withMatch(match))
# Hidden utility command to look up the snowflake ID of your own user. This isn't secret or insecure, but it's also not common, so it isn't listed.
if first_line_lower.startswith('!whoami'):
return self.handleWhoAmI(context)
# Hidden utility command to look up the snowflake ID of a member. This isn't secret or insecure, but it's also not common, so it isn't listed.
match = WotvBotConstants.WHOIS_PATTERN.match(first_line_lower)
if match:
return await self.handleWhoIs(context.shallowCopy().withMatch(match))
if WotvBotConstants.ADMIN_ADD_ESPER_PATTERN.match(first_line_lower) or WotvBotConstants.SANDBOX_ADMIN_ADD_ESPER_PATTERN.match(message.content):
return self.handleAdminAddEsper(context.shallowCopy().withEsperResonanceManager(esper_resonance_manager))
if WotvBotConstants.ADMIN_ADD_UNIT_PATTERN.match(first_line_lower) or WotvBotConstants.SANDBOX_ADMIN_ADD_UNIT_PATTERN.match(message.content):
return self.handleAdminAddUnit(context.shallowCopy().withEsperResonanceManager(esper_resonance_manager))
if WotvBotConstants.ADMIN_ADD_VC_PATTERN.match(first_line_lower):
return self.handleAdminAddVisionCard(context.shallowCopy().withVisionCardManager(vision_card_manager))
if WotvBotConstants.ADMIN_ADD_USER_PATTERN.match(first_line_lower):
return self.handleAdminAddUser(context.shallowCopy().withEsperResonanceManager(esper_resonance_manager).withVisionCardManager(vision_card_manager))
if first_line_lower.startswith('!resonance'):
responseText = '<@{0}>: Invalid !resonance command. Use !help for more information.'.format(from_id)
return (responseText, None)
if first_line_lower.startswith('!help'):
responseText = WotvBotConstants.HELP.format(self.wotv_bot_config.esper_resonance_spreadsheet_id, self.wotv_bot_config.vision_card_spreadsheet_id)
return (responseText, None)
return ('<@{0}>: Invalid or unknown command. Use !help to see all supported commands and !admin-help to see special admin commands. '\
'Please do this via a direct message to the bot, to avoid spamming the channel.'.format(from_id), None)
def handleTargetedResonanceLookupForSelf(self, context: CommandContextInfo) -> (str, str):
"""Handle !res command for self-lookup of a specific (unit, esper) tuple."""
unit_name = context.command_match.group(1).strip()
esper_name = context.command_match.group(2).strip()
print('resonance fetch from user %s#%s, for user %s, for unit %s, for esper %s' % (
context.from_name, context.from_discrim, context.from_name, unit_name, esper_name))
resonance, pretty_unit_name, pretty_esper_name = context.esper_resonance_manager.readResonance(None, context.from_id, unit_name, esper_name)
responseText = '<@{0}>: {1}/{2} has resonance {3}'.format(context.from_id, pretty_unit_name, pretty_esper_name, resonance)
return (responseText, None)
def handleTargetedResonanceLookupForOtherUser(self, context: CommandContextInfo) -> (str, str):
"""Handle !res command for lookup of a specific (unit, esper) tuple for a different user."""
target_user_name = context.command_match.group(1).strip()
unit_name = context.command_match.group(2).strip()
esper_name = context.command_match.group(3).strip()
print('resonance fetch from user %s#%s, for user %s, for unit %s, for esper %s' % (
context.from_name, context.from_discrim, target_user_name, unit_name, esper_name))
resonance, pretty_unit_name, pretty_esper_name = context.esper_resonance_manager.readResonance(target_user_name, None, unit_name, esper_name)
responseText = '<@{0}>: for user {1}, {2}/{3} has resonance {4}'.format(
context.from_id, target_user_name, pretty_unit_name, pretty_esper_name, resonance)
return (responseText, None)
def handleGeneralResonanceLookupForSelf(self, context: CommandContextInfo) -> (str, str):
"""Handle !res command for self-lookup of all resonance for a given unit or esper."""
target_name = context.command_match.group('target_name').strip()
print('resonance list fetch from user %s#%s, for target %s' % (context.from_name, context.from_discrim, target_name))
pretty_name, resonance_listing = context.esper_resonance_manager.readResonanceList(None, context.from_id, target_name)
responseText = '<@{0}>: resonance listing for {1}:\n{2}'.format(context.from_id, pretty_name, resonance_listing)
return (responseText, None)
def handleResonanceSet(self, context: CommandContextInfo) -> (str, str):
"""Handle !res-set command to set resonance for a specific unit and esper tuple."""
unit_name = context.command_match.group('unit').strip()
esper_name = context.command_match.group('esper').strip()
resonance_numeric_string = context.command_match.group('resonance_level').strip()
priority = None
if context.command_match.group('priority'):
priority = context.command_match.group('priority').strip()
comment = None
if context.command_match.group('comment'):
comment = context.command_match.group('comment').strip()
print('resonance set from user %s#%s, for unit %s, for esper %s, to resonance %s, with priority %s, comment %s' % (
context.from_name, context.from_discrim, unit_name, esper_name, resonance_numeric_string, priority, comment))
old_resonance, new_resonance, pretty_unit_name, pretty_esper_name = context.esper_resonance_manager.setResonance(
context.from_id, unit_name, esper_name, resonance_numeric_string, priority, comment)
responseText = '<@{0}>: {1}/{2} resonance has been set to {3} (was: {4})'.format(
context.from_id, pretty_unit_name, pretty_esper_name, new_resonance, old_resonance)
if (resonance_numeric_string and int(resonance_numeric_string) == 10):
# reaction = '\U0001F4AA' # CLDR: flexed biceps
reaction = '\U0001F3C6' # CLDR: trophy
else:
reaction = '\U00002705' # CLDR: check mark button
return (responseText, reaction)
def handleWhoAmI(self, context: CommandContextInfo) -> (str, str):
"""Handle !whoami command to fetch your own snowflake ID."""
responseText = '<@{id}>: Your snowflake ID is {id}'.format(id=context.from_id)
return (responseText, None)
async def handleWhoIs(self, context: CommandContextInfo) -> (str, str):
"""Handle !whois command to fetch the snowflake ID for a given user."""
original_match = WotvBotConstants.WHOIS_PATTERN.match(context.original_message.content) # Fetch original-case name
target_member_name = original_match.group('server_handle').strip()
# As of December 2020, possibly earlier, the following line no longer works:
# members = context.original_message.guild.members
# Instead have to fetch the list from the server, and enable the "SERVER MEMBERS INTENT" permission in the bot admin page on Discord.
members = await context.original_message.guild.fetch_members(limit=1000).flatten()
for member in members:
if member.name == target_member_name:
responseText = '<@{0}>: the snowflake ID for {1} is {2}'.format(context.from_id, target_member_name, member.id)
return (responseText, None)
responseText = '<@{0}>: no such member {1}'.format(context.from_id, target_member_name)
return (responseText, None)
def handleAdminAddEsper(self, context: CommandContextInfo) -> (str, str):
"""Handle !admin-add-esper and !sandbox-admin-add-esper commands to add a new esper to the resonance tracker."""
sandbox = True
match = WotvBotConstants.ADMIN_ADD_ESPER_PATTERN.match(context.original_message.content)
if match:
sandbox = False
else:
match = WotvBotConstants.SANDBOX_ADMIN_ADD_ESPER_PATTERN.match(context.original_message.content)
esper_name = match.group('name').strip()
esper_url = match.group('url').strip()
left_or_right_of = match.group('left_or_right_of').strip()
column = match.group('column').strip()
print('esper add (sandbox mode={6}) from user {0}#{1}, for esper {2}, url {3}, position {4}, column {5}'.format(
context.from_name, context.from_discrim, esper_name, esper_url, left_or_right_of, column, sandbox))
context.esper_resonance_manager.addEsperColumn(context.from_id, esper_name, esper_url, left_or_right_of, column, sandbox)
responseText = '<@{0}>: Added esper {1}!'.format(context.from_id, esper_name)
return (responseText, None)
def handleAdminAddUnit(self, context: CommandContextInfo) -> (str, str):
"""Handle !admin-add-unit and !sandbox-admin-add-unit commands to add a new unit to the resonance tracker."""
sandbox = True
match = WotvBotConstants.ADMIN_ADD_UNIT_PATTERN.match(context.original_message.content)
if match:
sandbox = False
else:
match = WotvBotConstants.SANDBOX_ADMIN_ADD_UNIT_PATTERN.match(context.original_message.content)
unit_name = match.group('name').strip()
unit_url = match.group('url').strip()
above_or_below = match.group('above_or_below').strip()
row1Based = match.group('row1Based').strip()
print('unit add (sandbox mode={6}) from user {0}#{1}, for unit {2}, url {3}, position {4}, row {5}'.format(
context.from_name, context.from_discrim, unit_name, unit_url, above_or_below, row1Based, sandbox))
context.esper_resonance_manager.addUnitRow(context.from_id, unit_name, unit_url, above_or_below, row1Based, sandbox)
responseText = '<@{0}>: Added unit {1}!'.format(context.from_id, unit_name)
return (responseText, None)
def handleAdminAddVisionCard(self, context: CommandContextInfo) -> (str, str):
"""Handle !admin-add-vc command to add a new vision card."""
match = WotvBotConstants.ADMIN_ADD_VC_PATTERN.match(context.original_message.content)
card_name = match.group('name').strip()
card_url = match.group('url').strip()
above_or_below = match.group('above_or_below').strip()
row1Based = match.group('row1Based').strip()
print('vc add from user {0}#{1}, for card {2}, url {3}, position {4}, row {5}'.format(
context.from_name, context.from_discrim, card_name, card_url, above_or_below, row1Based))
context.vision_card_manager.addVisionCardRow(context.from_id, card_name, card_url, above_or_below, row1Based)
responseText = '<@{0}>: Added card {1}!'.format(context.from_id, card_name)
return (responseText, None)
def handleAdminAddUser(self, context: CommandContextInfo) -> (str, str):
"""Handle !admin-add-user command to add a new unit to the resonance tracker and the administrative spreadsheet."""
if not AdminUtils.isAdmin(self.wotv_bot_config.spreadsheet_app, self.wotv_bot_config.access_control_spreadsheet_id, context.from_id):
raise ExposableException('You do not have permission to add a user.')
match = WotvBotConstants.ADMIN_ADD_USER_PATTERN.match(context.original_message.content)
snowflake_id = match.group('snowflake_id').strip()
nickname = match.group('nickname').strip()
user_type = match.group('user_type').strip().lower()
is_admin = False
if user_type == 'admin':
is_admin = True
print('user add from user {0}#{1}, for snowflake_id {2}, nickname {3}, is_admin {4}'.format(
context.from_name, context.from_discrim, snowflake_id, nickname, is_admin))
AdminUtils.addUser(self.wotv_bot_config.spreadsheet_app, self.wotv_bot_config.access_control_spreadsheet_id, nickname, snowflake_id, is_admin)
context.esper_resonance_manager.addUser(nickname)
context.vision_card_manager.addUser(nickname)
responseText = '<@{0}>: Added user {1}!'.format(context.from_id, nickname)
return (responseText, None)
async def handleVisionCardDebug(self, context: CommandContextInfo) -> (str, str):
"""Handle !xocr and !xocr-debug commands to perform OCR on a Vision Card."""
return await self.handleVisionCardSet(context, is_debug=True)
async def handleVisionCardSet(self, context: CommandContextInfo, is_debug: bool = False) -> (str, str):
"""Handle !vc-set"""
# Try to extract text from a vision card screenshot that is sent as an attachment to this message.
url = context.original_message.attachments[0].url
print('Vision Card OCR request from user %s#%s, for url %s' % (context.from_name, context.from_discrim, url))
screenshot = None
if self.INTEG_TEST_LOCAL_FILESYSTEM_READ_FOR_VISION_CARD:
screenshot = VisionCardOcrUtils.loadScreenshotFromFilesystem(url)
else:
screenshot = VisionCardOcrUtils.downloadScreenshotFromUrl(url)
vision_card = VisionCardOcrUtils.extractVisionCardFromScreenshot(screenshot, is_debug)
if is_debug:
combined_image = VisionCardOcrUtils.mergeDebugImages(vision_card)
buffer = io.BytesIO()
combined_image.save(buffer, format='PNG')
buffer.seek(0)
temp_file = discord.File(buffer, filename='Intermediate OCR Debug.png')
await context.original_message.channel.send('Intermediate OCR Debug. Raw info text:\n```{0}```\nRaw stats text: ```{1}```'.format(
vision_card.info_debug_raw_text,
vision_card.stats_debug_raw_text), file=temp_file)
# Print errors to the console, but do not return them as we cannot guarantee that there is no sensitive
# information in here, such as possible library exceptions, i/o exceptions, etceteras.
if vision_card.error_messages is not None and len(vision_card.error_messages) > 0:
print('errors found during vision card conversion: ' + str(vision_card.error_messages))
reaction = None
if vision_card.successfully_extracted is True:
responseText = '<@{0}>: {1}'.format(context.from_id, vision_card.prettyPrint())
if not is_debug:
context.vision_card_manager.setVisionCard(context.from_id, vision_card)
reaction = '\U00002705' # CLDR: check mark button
else:
responseText = '<@{0}>: Vision card extraction has failed. You may try again with !vc-debug for a clue about what has gone wrong'.format(
context.from_id)
return (responseText, reaction)
async def handleVisionCardFetchByName(self, context: CommandContextInfo) -> (str, str):
"""Handle !vc command for self-lookup of a given vision card by name"""
target_name = context.command_match.group('target_name').strip()
print('vision card fetch from user %s#%s, for target %s' % (context.from_name, context.from_discrim, target_name))
vision_card = context.vision_card_manager.readVisionCardByName(None, context.from_id, target_name)
responseText = '<@{0}>: Vision Card:\n{1}'.format(context.from_id, str(vision_card.prettyPrint()))
return (responseText, None)
async def handleVisionCardAbilitySearch(self, context: CommandContextInfo) -> (str, str):
"""Handle !vc-ability command for self-lookup of a given vision card by party/bestowed ability fuzzy-match"""
search_text = context.command_match.group('search_text').strip()
print('vision card ability search from user %s#%s, for text %s' % (context.from_name, context.from_discrim, search_text))
vision_cards = context.vision_card_manager.searchVisionCardsByAbility(None, context.from_id, search_text)
if len(vision_cards) == 0:
responseText = '<@{0}>: No vision cards matched the ability search.'.format(context.from_id)
return (responseText, None)
responseText = '<@{0}>: Matching Vision Cards:\n'.format(context.from_id)
for vision_card in vision_cards:
responseText += ' ' + vision_card.Name + '\n'
responseText += ' Party Ability: ' + vision_card.PartyAbility + '\n'
for bestowed_effect in vision_card.BestowedEffects:
responseText += ' Bestowed Effect: ' + bestowed_effect + '\n'
return (responseText, None)
@staticmethod
def rarityAndElementParenthetical(unit: WotvUnit) -> str:
"""Generate a parenthetical string with the unit's rarity and element(s)"""
text = '(' + str(unit.rarity) + ' rarity, '
if not unit.elements:
return text + 'no element)'
text += unit.elements[0]
if len(unit.elements) > 1:
for element in unit.elements[1:]:
text += '/' + str(element)
text += ' element'
if len(unit.elements) > 1:
text += 's'
return text + ')'
def prettyPrintUnitSkillSearchResult(self, result: UnitSkillSearchResult):
"""Print a useful, human-readable description of the skill match including the unit name, element, rarity, the skill name,
and how the skill is unlocked."""
if result.is_master_ability:
return 'Master ability for ' + result.unit.name + ' ' + WotvBot.rarityAndElementParenthetical(result.unit) + ': ' + result.skill.description
if result.is_limit_burst:
return 'Limit burst (' + result.skill.name + ') for ' + result.unit.name + ' ' + WotvBot.rarityAndElementParenthetical(result.unit) + ': ' + result.skill.description
text = 'Skill "' + result.skill.name + '" learned by ' + result.unit.name
text += ' ' + WotvBot.rarityAndElementParenthetical(result.unit)
text += ' with job ' + result.board_skill.unlocked_by_job.name + ' at job level ' + str(result.board_skill.unlocked_by_job_level)
text += ': ' + result.skill.description
return text
def prettyPrintUnitJobSearchResult(self, result: UnitJobSearchResult):
"""Print a useful, human-readable description of the job match including the unit name, element, rarity, and job name."""
text = 'Job "' + result.job.name + '" learned by ' + result.unit.name
text += ' ' + WotvBot.rarityAndElementParenthetical(result.unit)
return text
def prettyPrintUnitSearchResult(self, result: UnitSearchResult):
"""Print a useful, human-readable description of any search result, as appropriate to the type."""
if hasattr(result, 'is_master_ability'):
return self.prettyPrintUnitSkillSearchResult(result)
elif hasattr(result, 'job'):
return self.prettyPrintUnitJobSearchResult(result)
else:
return result.unit.name + ' ' + WotvBot.rarityAndElementParenthetical(result.unit)
@staticmethod
def getExtraCommandLines(context: CommandContextInfo):
"""Extract all extra non-empty lines from a command and return them as a list."""
lines = context.original_message.content.splitlines()
extra_lines = []
if len(lines) > 1:
for line in lines[1:]:
line = line.strip()
if line:
extra_lines.append(line)
return extra_lines
# Deprecated - Use rich unit search instead, e.g. "!unit-search skill-name <search_text>"
async def handleFindSkillsByName(self, context: CommandContextInfo) -> (str, str):
"""Handle !skills-by-name command"""
search_text = context.command_match.group('search_text').strip()
print('skills-by-name search from user %s#%s, for text %s' % (context.from_name, context.from_discrim, search_text))
refinements = WotvBot.getExtraCommandLines(context)
if len(refinements) > 0:
print(' refinements: ' + str(refinements))
results = DataFileSearchUtils.richUnitSearch(self.wotv_bot_config.data_files, 'skill-name', search_text, refinements)
if len(results) == 0:
responseText = '<@{0}>: No skills matched the search.'.format(context.from_id)
return (responseText, None)
responseText = '<@{0}>: Matching Skills:\n'.format(context.from_id)
results = sorted(results, key=lambda one_result : one_result.unit.name)
truncated = False
if len(results) > 25:
results = results[:25]
truncated = True
for result in results:
responseText += self.prettyPrintUnitSearchResult(result) + '\n'
if truncated:
responseText += 'Results truncated because there were too many.'
return (responseText.strip(), None)
# Deprecated - Use rich unit search instead, e.g. "!unit-search skill-desc <search_text>"
async def handleFindSkillsByDescription(self, context: CommandContextInfo) -> (str, str):
"""Handle !skills-by-desc command"""
search_text = context.command_match.group('search_text').strip()
print('skills-by-description search from user %s#%s, for text %s' % (context.from_name, context.from_discrim, search_text))
refinements = WotvBot.getExtraCommandLines(context)
if len(refinements) > 0:
print(' refinements: ' + str(refinements))
results = DataFileSearchUtils.richUnitSearch(self.wotv_bot_config.data_files, 'skill-desc', search_text, refinements)
if len(results) == 0:
responseText = '<@{0}>: No skills matched the search.'.format(context.from_id)
return (responseText, None)
responseText = '<@{0}>: Matching Skills:\n'.format(context.from_id)
results = sorted(results, key=lambda one_result : one_result.unit.name)
truncated = False
if len(results) > 25:
results = results[:25]
truncated = True
for result in results:
responseText += self.prettyPrintUnitSearchResult(result) + '\n'
if truncated:
responseText += 'Results truncated because there were too many.'
return (responseText.strip(), None)
async def handleRichUnitSearch(self, context: CommandContextInfo) -> (str, str):
"""Handle !unit-search command"""
search_type = context.command_match.group('search_type').strip()
search_text = None
if search_type != 'all':
search_text = context.command_match.group('search_text').strip()
print('unit search from user %s#%s, type %s, text %s' % (context.from_name, context.from_discrim, search_type, search_text))
refinements = WotvBot.getExtraCommandLines(context)
if len(refinements) > 0:
print(' refinements: ' + str(refinements))
results = DataFileSearchUtils.richUnitSearch(self.wotv_bot_config.data_files, search_type, search_text, refinements)
if len(results) == 0:
responseText = '<@{0}>: No units matched the search.'.format(context.from_id)
return (responseText, None)
responseText = '<@{0}>: Results:\n'.format(context.from_id)
results = sorted(results, key=lambda one_result : one_result.unit.name)
truncated = False
if len(results) > 25:
results = results[:25]
truncated = True
for result in results:
responseText += self.prettyPrintUnitSearchResult(result) + '\n'
if truncated:
responseText += 'Results truncated because there were too many.'
return (responseText.strip(), None)
@staticmethod
async def whimsyShopNrgReminderCallback(target_channel_id: str, from_id: str):
"""Handles a reminder callback for a whimsy shop nrg reminder."""
discord_client: discord.Client = WotvBot.getStaticInstance().wotv_bot_config.discord_client
text_channel: discord.TextChannel = discord_client.get_channel(target_channel_id)
#discord_client.loop.create_task(text_channel.send(content = '<@{0}>: This is your requested whimsy shop reminder: NRG spent will now start counting towards the next Whimsy Shop.'.format(from_id)))
await text_channel.send(content = '<@{0}>: This is your requested whimsy shop reminder: NRG spent will now start counting towards the next Whimsy Shop.'.format(from_id))
@staticmethod
async def whimsyShopSpawnReminderCallback(target_channel_id: str, from_id: str):
"""Handles a reminder callback for a whimsy shop spawn reminder."""
discord_client: discord.Client = WotvBot.getStaticInstance().wotv_bot_config.discord_client
text_channel: discord.TextChannel = discord_client.get_channel(target_channel_id)
#discord_client.loop.create_task(text_channel.send(content = '<@{0}>: This is your requested whimsy shop reminder: The Whimsy Shop is ready to spawn again.'.format(from_id)))
await text_channel.send(content = '<@{0}>: This is your requested whimsy shop reminder: The Whimsy Shop is ready to spawn again.'.format(from_id))
async def handleWhimsyReminder(self, context: CommandContextInfo) -> (str, str):
"""Handle !whimsy command for a whimsy reminder"""
reminders = self.wotv_bot_config.reminders # Shorthand
owner_id = str(context.from_id) # Shorthand
command = '<none>'
if context.command_match.group('command'):
command = context.command_match.group('command').strip()
print('Whimsy reminder request from user %s#%s, command %s' % (context.from_name, context.from_discrim, command))
responseText = '<@{0}>: Unknown/unsupported !whimsy command. Use !help for for more information.'.format(context.from_id)
# Default behavior - be smart. If the user has got a reminder set, don't overwrite it unless they pass set-reminder as the command.
# If they do not have a reminder set, go ahead and set it now.
append_overwrite_reminder_message = False # Whether or not to add some reminder text to the message
if command == '<none>':
# Check if an existing reminder is set. If so prompt to overwrite...
if reminders.hasPendingWhimsyNrgReminder(owner_id) or reminders.hasPendingWhimsySpawnReminder(owner_id):
command = 'when'
append_overwrite_reminder_message = True # Remind the user how to overwrite the current timer.
else:
command = 'set-reminder' # Assume the user wants to set a reminder.
if command == 'set-reminder':
append_existing_canceled_message = reminders.hasPendingWhimsyNrgReminder(owner_id) or reminders.hasPendingWhimsySpawnReminder(owner_id)
nrg_callback: callable = WotvBot.whimsyShopNrgReminderCallback
nrg_params = [context.original_message.channel.id, owner_id]
spawn_callback: callable = WotvBot.whimsyShopSpawnReminderCallback
spawn_params = nrg_params
reminders.addWhimsyReminder(context.from_name, owner_id, nrg_callback, nrg_params, spawn_callback, spawn_params,
self.whimsy_shop_nrg_reminder_delay_ms, self.whimsy_shop_spawn_reminder_delay_ms)
responseText = '<@{0}>: Your reminder has been set.'.format(context.from_id)
if append_existing_canceled_message:
responseText += ' Your previous outstanding reminder has been discarded.'
elif command == 'when':
if reminders.hasPendingWhimsyNrgReminder(owner_id):
time_left_minutes = int(reminders.timeTillWhimsyNrgReminder(owner_id) / 60)
responseText = '<@{0}>: NRG spent will start counting towards the next Whimsy Shop in about {1} minutes.'.format(owner_id, str(time_left_minutes))
if append_overwrite_reminder_message:
responseText += ' To force the timer to reset to 60 minutes *immediately*, use the command "!whimsy set-reminder".'
elif reminders.hasPendingWhimsySpawnReminder(owner_id):
time_left_minutes = int(reminders.timeTillWhimsySpawnReminder(owner_id) / 60)
responseText = '<@{0}>: The Whimsy Shop will be ready to spawn again in about {1} minutes.'.format(owner_id, str(time_left_minutes))
if append_overwrite_reminder_message:
responseText += ' To force the timer to reset to 60 minutes *immediately*, use the command "!whimsy set-reminder".'
else:
responseText = '<@{0}>: You do not currently have a whimsy reminder set.'.format(context.from_id)
elif command == 'cancel':
reminders.cancelWhimsyReminders(owner_id)
responseText = '<@{0}>: Any and all outstanding whimsy reminders have been canceled.'.format(context.from_id)
return (responseText, None)
async def handleRoll(self, context: CommandContextInfo) -> (str, str):
"""Handle !roll command to simulate a dice roll."""
spec: DiceSpec = DiceSpec.parse(context.command_match.group('dice_spec'))
print('Dice roll request from user %s#%s, spec %s' % (context.from_name, context.from_discrim, str(spec)))
if spec.num_dice > 50:
responseText = '<@{0}>: Too many dice in !roll command (max 50). Use !help for for more information.'.format(context.from_id)
else:
results: List[int] = Rolling.rollDice(spec)
total = 0
for one_roll in results:
total += one_roll
responseText = '<@{0}>: Rolled a total of {1}. Dice values were: {2}'.format(context.from_id, str(total), str(results))
return (responseText.strip(), None)
async def handlePrediction(self, context: CommandContextInfo) -> (str, str):
"""Handle !predict/astrologize/divine/foretell (etc) command to make a funny prediction."""
query = context.command_match.group('query')
print('Prediction request from user %s#%s, query %s' % (context.from_name, context.from_discrim, str(query)))
responseText = '<@{0}>: {1}'.format(context.from_id, self.predictions.predict(query))
return (responseText.strip(), None)
async def handleSchedule(self, context: CommandContextInfo) -> (str, str):
"""Handle a request for the weekly schedule."""
print('Schedule request from user %s#%s' % (context.from_name, context.from_discrim))
responseText = '<@{0}>:\n{1}'.format(context.from_id, WeeklyEventSchedule.getDoubleDropRateSchedule('** >> ', ' << **'))
return (responseText.strip(), None)
async def handleMats(self, context: CommandContextInfo) -> (str, str):
"""Handle a request for the current double-drop rate room."""
print('Mats request from user %s#%s' % (context.from_name, context.from_discrim))
responseText = '<@{0}>:\n'.format(context.from_id)
responseText += 'Today: ' + WeeklyEventSchedule.getTodaysDoubleDropRateEvents() + '\n'
responseText += 'Tomorrow: ' + WeeklyEventSchedule.getTomorrowsDoubleDropRateEvents() + '\n'
responseText += 'For the full schedule, use !schedule.'
return (responseText.strip(), None)
async def createOrResetPeriodicStatusUpdateCallback(self):
"""Create or reset the status update callback for the entire bot."""
self.wotv_bot_config.reminders.createOrResetPeriodicStatusUpdateCallback(WotvBot.periodicStatusUpdateCallback)
@staticmethod
async def periodicStatusUpdateCallback():
"""Handles a callback for a periodic status update."""
bot: WotvBot = WotvBot.getStaticInstance()
discord_client: discord.Client = bot.wotv_bot_config.discord_client
new_status = WeeklyEventSchedule.getTodaysDoubleDropRateEvents()
if bot.last_status is None or bot.last_status != new_status:
print('Updating bot status to: ' + new_status)
# Apparently bots cannot use a custom status so gotta stick with a regular one like "Playing" (Game)
await discord_client.change_presence(activity=discord.Game(name=new_status))
bot.last_status = new_status
@staticmethod
async def dailyReminderCallback(target_channel_id: str, from_id: str, requested_reminders: List[str]):
"""Handles a reminder callback for daily reminders."""
discord_client: discord.Client = WotvBot.getStaticInstance().wotv_bot_config.discord_client
text_channel: discord.TextChannel = discord_client.get_channel(target_channel_id)
reminder_text = '<@{0}>: This is your requested daily reminder. Cancel daily reminders with "!daily-reminders none" or use "!help".'.format(from_id)
if 'mats' in requested_reminders:
reminder_text += '\n Today\'s daily double rate drops are: ' + WeeklyEventSchedule.getTodaysDoubleDropRateEvents()
await text_channel.send(content = reminder_text)
async def handleDailyReminders(self, context: CommandContextInfo) -> (str, str):
"""Handle !daily-reminders command for various daily reminders, such as double-drop-rates"""
reminders = self.wotv_bot_config.reminders # Shorthand
owner_id = str(context.from_id) # Shorthand
reminder_list_str = '<default>'
if context.command_match.group('reminder_list'):
reminder_list_str = context.command_match.group('reminder_list').strip()
print('Daily reminders request from user %s#%s, reminder list %s' % (context.from_name, context.from_discrim, reminder_list_str))
responseText = '<@{0}>: Unknown/unsupported !daily-reminders command. Use !help for for more information.'.format(context.from_id)
requested_reminders: List[str] = reminder_list_str.split(',')
configured_reminders_message = '<@{0}>: Your daily reminders have been configured:'.format(context.from_id)
# Default behavior - be smart. If the user has got a reminder set, don't overwrite it unless they pass "none" as the list.
if reminder_list_str == '<default>':
if reminders.hasDailyReminder(owner_id):
responseText = '<@{0}>: You have daily reminders configured. To clear them, use "!daily-reminders none".'.format(context.from_id)
else:
responseText = '<@{0}>: You do not currently have daily reminders configured. Use !help for more information.'.format(context.from_id)
elif reminder_list_str == 'none':
reminders.cancelDailyReminder(owner_id)
responseText = '<@{0}>: Your daily reminders have been canceled.'.format(context.from_id)
else:
added_reminders = []
if 'mats' in requested_reminders:
configured_reminders_message += '\n daily double-drop rate reminder ("mats")'
added_reminders.append('mats')
callback: callable = WotvBot.dailyReminderCallback
callback_params = [context.original_message.channel.id, owner_id, added_reminders]
reminders.addDailyReminder(context.from_name, owner_id, callback, callback_params)
responseText = configured_reminders_message
return (responseText, None) | en | 0.76597 | The runtime heart of the WOTV Bot. An exception whose error text is safe to show in Discord. Configuration for a single instance of the bot. All fields are required to be set. access_control_spreadsheet_id: the ID of the spreadsheet where access controls are kept esper_resonance_spreadsheet_id: the ID of the spreadsheet where esper resonance is tracked sandbox_esper_resonance_spreadsheet_id: the ID of the sandbox alternative to the real esper_resonance_spreadsheet_id vision_card_spreadsheet_id: the ID of the spreadsheet where vision cards are tracked spreadsheet_app: the Google spreadsheets Resource obtained from calling the spreadsheets() method on a Service Resource. discord_client: the Discord client data_files: the WotV data dump. reminders: the reminders subsystem. Context information for the command that is being executed. # Convenience # Convenience # Convenience # For unusual use cases Make a shallow copy of this object, containing only the from_name, from_id, from_discrim and original_message fields Assign the specified esper resonance manager and return a reference to this object. Assign the specified vision card manager and return a reference to this object. Assign the specified match and return a reference to this object. An instance of the bot, configured to manage specific spreadsheets and using Discord and Google credentials. # The static instance of the bot, not for general consumption. # Set this to true in an integration test to allow a local filesystem path to be used in a Discord # message as the source of the image to be processed by OCR for Vision Card text extraction. For # obvious security reasons, this is false by default. # Set the static instance of the bot to this instance. # 30 minutes # 60 minutes # Last status set Returns an unsafe static reference to the "current" bot, if there is one. In reality this is just the most recently-created bot. Use with extreme caution. This is primarily intended for internal use cases where a static method is required, such as the callback for a "apscheduler"-module task such as a reminder that is being invoked asynchronously and potentially across different instances of the bot process where the specific instance of the bot is irrelevant. Process the request and produce a response. # Bail out early if anything looks insane. # Set up the context used in handling every possible command. # TODO: Clean up these fields that are not part of the CommandContextInfo object. # TODO: Hold these references longer after cleaning up the rest of the code, in an application context. # To support multi-line commands, we only match the command itself against the first line. # Predictions # Hidden utility command to look up the snowflake ID of your own user. This isn't secret or insecure, but it's also not common, so it isn't listed. # Hidden utility command to look up the snowflake ID of a member. This isn't secret or insecure, but it's also not common, so it isn't listed. Handle !res command for self-lookup of a specific (unit, esper) tuple. #%s, for user %s, for unit %s, for esper %s' % ( Handle !res command for lookup of a specific (unit, esper) tuple for a different user. #%s, for user %s, for unit %s, for esper %s' % ( Handle !res command for self-lookup of all resonance for a given unit or esper. #%s, for target %s' % (context.from_name, context.from_discrim, target_name)) Handle !res-set command to set resonance for a specific unit and esper tuple. #%s, for unit %s, for esper %s, to resonance %s, with priority %s, comment %s' % ( # reaction = '\U0001F4AA' # CLDR: flexed biceps # CLDR: trophy # CLDR: check mark button Handle !whoami command to fetch your own snowflake ID. Handle !whois command to fetch the snowflake ID for a given user. # Fetch original-case name # As of December 2020, possibly earlier, the following line no longer works: # members = context.original_message.guild.members # Instead have to fetch the list from the server, and enable the "SERVER MEMBERS INTENT" permission in the bot admin page on Discord. Handle !admin-add-esper and !sandbox-admin-add-esper commands to add a new esper to the resonance tracker. #{1}, for esper {2}, url {3}, position {4}, column {5}'.format( Handle !admin-add-unit and !sandbox-admin-add-unit commands to add a new unit to the resonance tracker. #{1}, for unit {2}, url {3}, position {4}, row {5}'.format( Handle !admin-add-vc command to add a new vision card. #{1}, for card {2}, url {3}, position {4}, row {5}'.format( Handle !admin-add-user command to add a new unit to the resonance tracker and the administrative spreadsheet. #{1}, for snowflake_id {2}, nickname {3}, is_admin {4}'.format( Handle !xocr and !xocr-debug commands to perform OCR on a Vision Card. Handle !vc-set # Try to extract text from a vision card screenshot that is sent as an attachment to this message. #%s, for url %s' % (context.from_name, context.from_discrim, url)) # Print errors to the console, but do not return them as we cannot guarantee that there is no sensitive # information in here, such as possible library exceptions, i/o exceptions, etceteras. # CLDR: check mark button Handle !vc command for self-lookup of a given vision card by name #%s, for target %s' % (context.from_name, context.from_discrim, target_name)) Handle !vc-ability command for self-lookup of a given vision card by party/bestowed ability fuzzy-match #%s, for text %s' % (context.from_name, context.from_discrim, search_text)) Generate a parenthetical string with the unit's rarity and element(s) Print a useful, human-readable description of the skill match including the unit name, element, rarity, the skill name, and how the skill is unlocked. Print a useful, human-readable description of the job match including the unit name, element, rarity, and job name. Print a useful, human-readable description of any search result, as appropriate to the type. Extract all extra non-empty lines from a command and return them as a list. # Deprecated - Use rich unit search instead, e.g. "!unit-search skill-name <search_text>" Handle !skills-by-name command #%s, for text %s' % (context.from_name, context.from_discrim, search_text)) # Deprecated - Use rich unit search instead, e.g. "!unit-search skill-desc <search_text>" Handle !skills-by-desc command #%s, for text %s' % (context.from_name, context.from_discrim, search_text)) Handle !unit-search command #%s, type %s, text %s' % (context.from_name, context.from_discrim, search_type, search_text)) Handles a reminder callback for a whimsy shop nrg reminder. #discord_client.loop.create_task(text_channel.send(content = '<@{0}>: This is your requested whimsy shop reminder: NRG spent will now start counting towards the next Whimsy Shop.'.format(from_id))) Handles a reminder callback for a whimsy shop spawn reminder. #discord_client.loop.create_task(text_channel.send(content = '<@{0}>: This is your requested whimsy shop reminder: The Whimsy Shop is ready to spawn again.'.format(from_id))) Handle !whimsy command for a whimsy reminder # Shorthand # Shorthand #%s, command %s' % (context.from_name, context.from_discrim, command)) # Default behavior - be smart. If the user has got a reminder set, don't overwrite it unless they pass set-reminder as the command. # If they do not have a reminder set, go ahead and set it now. # Whether or not to add some reminder text to the message # Check if an existing reminder is set. If so prompt to overwrite... # Remind the user how to overwrite the current timer. # Assume the user wants to set a reminder. Handle !roll command to simulate a dice roll. #%s, spec %s' % (context.from_name, context.from_discrim, str(spec))) Handle !predict/astrologize/divine/foretell (etc) command to make a funny prediction. #%s, query %s' % (context.from_name, context.from_discrim, str(query))) Handle a request for the weekly schedule. #%s' % (context.from_name, context.from_discrim)) Handle a request for the current double-drop rate room. #%s' % (context.from_name, context.from_discrim)) Create or reset the status update callback for the entire bot. Handles a callback for a periodic status update. # Apparently bots cannot use a custom status so gotta stick with a regular one like "Playing" (Game) Handles a reminder callback for daily reminders. Handle !daily-reminders command for various daily reminders, such as double-drop-rates # Shorthand # Shorthand #%s, reminder list %s' % (context.from_name, context.from_discrim, reminder_list_str)) # Default behavior - be smart. If the user has got a reminder set, don't overwrite it unless they pass "none" as the list. | 2.155269 | 2 |
nits.py | Farhad-Mrkm/NCE_ICC-2022 | 1 | 6617029 | <filename>nits.py<gh_stars>1-10
#Author__Farhad_Mirkarimi -*- coding: utf-8 -*-
import os
import h5py
import glob, os
import numpy as np
import matplotlib.pyplot as plt
from numpy import mean
from numpy import std
import torch
import torch.nn as nn
from tqdm.auto import tqdm, trange
from numpy.random import default_rng
import torch.nn.functional as F
def mlp(dim, hidden_dim, output_dim, layers, activation):
"""Create a mlp from the configurations."""
activation = {
'relu': nn.ReLU,
'lrelu': nn.LeakyReLU
}[activation]
seq = [nn.Linear(dim, hidden_dim), activation()]
for _ in range(layers):
seq += [nn.Linear(hidden_dim, hidden_dim), activation()]
seq += [nn.Linear(hidden_dim, output_dim)]
return nn.Sequential(*seq)
class PeakConstraint(nn.Module):
"""Implements an activation for peak constraint """
def __init__(self, peak, **extra_kwargs):
super(PeakConstraint, self).__init__()
self.peak_activation = nn.Threshold(-peak, -peak)
def forward(self, x):
x = self.peak_activation(x)
neg1 = torch.tensor(-1.0)
x = neg1 * x
x = self.peak_activation(x)
x = neg1 * x
return x
class NIT(nn.Module):
"""NIT """
def __init__(self, dim, hidden_dim, layers, activation, avg_P,chan_type, peak=None,positive=None, **extra_kwargs):
super(NIT, self).__init__()
self._f = mlp(dim, hidden_dim, dim, layers, activation)
self.avg_P = torch.tensor(avg_P) # average power constraint
self.peak = peak # peak constraint
self.positive=positive
self.chan_type=chan_type
if self.peak is not None:
self.peak_activation = PeakConstraint(peak)
def forward(self, x):
if self.chan_type=='conts_awgn':
unnorm_tx = self._f(x)
norm_tx = unnorm_tx/torch.sqrt(torch.mean(torch.pow(unnorm_tx,2.0)))*torch.sqrt(self.avg_P)
if self.peak is not None:
norm_tx = self.peak_activation(norm_tx)
if self.positive is not None:
norm_tx=F.softplus(norm_tx)
return norm_tx
if self.positive is not None:
norm_tx=(torch.cosh(norm_tx))-1.0
#norm_tx=self.ps(norm_tx)
if self.peak is not None:
norm_tx=self.peak_activation(norm_tx)
return norm_tx
class _Channel(nn.Module):
"""AWGN Channel """
def __init__(self,type1):
super(_Channel, self).__init__()
self.stdev = torch.tensor(1.0,dtype=torch.float)
self.type1=type1
##
def forward(self, x):
if self.type1=='conts_awgn':
noise = torch.randn_like(x) * self.stdev
return x + noise
| <filename>nits.py<gh_stars>1-10
#Author__Farhad_Mirkarimi -*- coding: utf-8 -*-
import os
import h5py
import glob, os
import numpy as np
import matplotlib.pyplot as plt
from numpy import mean
from numpy import std
import torch
import torch.nn as nn
from tqdm.auto import tqdm, trange
from numpy.random import default_rng
import torch.nn.functional as F
def mlp(dim, hidden_dim, output_dim, layers, activation):
"""Create a mlp from the configurations."""
activation = {
'relu': nn.ReLU,
'lrelu': nn.LeakyReLU
}[activation]
seq = [nn.Linear(dim, hidden_dim), activation()]
for _ in range(layers):
seq += [nn.Linear(hidden_dim, hidden_dim), activation()]
seq += [nn.Linear(hidden_dim, output_dim)]
return nn.Sequential(*seq)
class PeakConstraint(nn.Module):
"""Implements an activation for peak constraint """
def __init__(self, peak, **extra_kwargs):
super(PeakConstraint, self).__init__()
self.peak_activation = nn.Threshold(-peak, -peak)
def forward(self, x):
x = self.peak_activation(x)
neg1 = torch.tensor(-1.0)
x = neg1 * x
x = self.peak_activation(x)
x = neg1 * x
return x
class NIT(nn.Module):
"""NIT """
def __init__(self, dim, hidden_dim, layers, activation, avg_P,chan_type, peak=None,positive=None, **extra_kwargs):
super(NIT, self).__init__()
self._f = mlp(dim, hidden_dim, dim, layers, activation)
self.avg_P = torch.tensor(avg_P) # average power constraint
self.peak = peak # peak constraint
self.positive=positive
self.chan_type=chan_type
if self.peak is not None:
self.peak_activation = PeakConstraint(peak)
def forward(self, x):
if self.chan_type=='conts_awgn':
unnorm_tx = self._f(x)
norm_tx = unnorm_tx/torch.sqrt(torch.mean(torch.pow(unnorm_tx,2.0)))*torch.sqrt(self.avg_P)
if self.peak is not None:
norm_tx = self.peak_activation(norm_tx)
if self.positive is not None:
norm_tx=F.softplus(norm_tx)
return norm_tx
if self.positive is not None:
norm_tx=(torch.cosh(norm_tx))-1.0
#norm_tx=self.ps(norm_tx)
if self.peak is not None:
norm_tx=self.peak_activation(norm_tx)
return norm_tx
class _Channel(nn.Module):
"""AWGN Channel """
def __init__(self,type1):
super(_Channel, self).__init__()
self.stdev = torch.tensor(1.0,dtype=torch.float)
self.type1=type1
##
def forward(self, x):
if self.type1=='conts_awgn':
noise = torch.randn_like(x) * self.stdev
return x + noise
| en | 0.654057 | #Author__Farhad_Mirkarimi -*- coding: utf-8 -*- Create a mlp from the configurations. Implements an activation for peak constraint NIT # average power constraint # peak constraint #norm_tx=self.ps(norm_tx) AWGN Channel ## | 2.101996 | 2 |
main.py | Tominous/Among-Us-Manager | 1 | 6617030 | import asyncio
import psycopg2
import discord
import random
import os
from objects import *
from discord.ext import commands
KEY = os.environ.get('KEY')
DATABASE_URL = os.environ.get('DATABASE_URL')
#INTENTS
intents = discord.Intents.default()
#intents.members = True
intents.typing = False
#LOAD DATABASE
try:
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
c = conn.cursor()
except:
print("Failed to connect to database")
def get_prefix(client, message):
try:
guildID = message.guild.id
sql_query = '''SELECT * FROM prefixes WHERE id = (%s)'''
c.execute(sql_query, (guildID,))
prefix = c.fetchone()
if prefix is None:
return 'am.'
else:
return prefix[1]
except:
return 'am.'
client = commands.AutoShardedBot(command_prefix = get_prefix, intents=intents, chunk_guilds_at_startup=False)
client.remove_command('help')
#LOAD COGS
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
#PREFIX COMMANDS
@client.command()
async def prefix(ctx, prefix):
userPrefix = str(prefix)
try:
guildID = ctx.message.guild.id
except AttributeError:
await ctx.send("Changing prefix is only possible in servers!")
return
sql_query = '''SELECT FROM prefixes WHERE id = (%s)'''
c.execute(sql_query, (guildID,))
prefix = c.fetchone()
#Check if prefix exists
if prefix is not None:
#Delete prior prefix
sql_execute = '''DELETE FROM prefixes WHERE id = (%s)'''
c.execute(sql_execute, (guildID,))
#Add prefix to list
sql_execute = '''INSERT INTO prefixes (id, prefix) VALUES (%s, %s)'''
c.execute(sql_execute, (guildID, userPrefix))
await ctx.send("Successfully changed prefix to " + userPrefix)
conn.commit()
@client.event
async def on_guild_remove(guild):
guildID = guild.id
sql_execute = '''DELETE FROM prefixes WHERE id = (%s)'''
c.execute(sql_execute, (guildID,))
conn.commit()
client.run(KEY)
| import asyncio
import psycopg2
import discord
import random
import os
from objects import *
from discord.ext import commands
KEY = os.environ.get('KEY')
DATABASE_URL = os.environ.get('DATABASE_URL')
#INTENTS
intents = discord.Intents.default()
#intents.members = True
intents.typing = False
#LOAD DATABASE
try:
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
c = conn.cursor()
except:
print("Failed to connect to database")
def get_prefix(client, message):
try:
guildID = message.guild.id
sql_query = '''SELECT * FROM prefixes WHERE id = (%s)'''
c.execute(sql_query, (guildID,))
prefix = c.fetchone()
if prefix is None:
return 'am.'
else:
return prefix[1]
except:
return 'am.'
client = commands.AutoShardedBot(command_prefix = get_prefix, intents=intents, chunk_guilds_at_startup=False)
client.remove_command('help')
#LOAD COGS
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
#PREFIX COMMANDS
@client.command()
async def prefix(ctx, prefix):
userPrefix = str(prefix)
try:
guildID = ctx.message.guild.id
except AttributeError:
await ctx.send("Changing prefix is only possible in servers!")
return
sql_query = '''SELECT FROM prefixes WHERE id = (%s)'''
c.execute(sql_query, (guildID,))
prefix = c.fetchone()
#Check if prefix exists
if prefix is not None:
#Delete prior prefix
sql_execute = '''DELETE FROM prefixes WHERE id = (%s)'''
c.execute(sql_execute, (guildID,))
#Add prefix to list
sql_execute = '''INSERT INTO prefixes (id, prefix) VALUES (%s, %s)'''
c.execute(sql_execute, (guildID, userPrefix))
await ctx.send("Successfully changed prefix to " + userPrefix)
conn.commit()
@client.event
async def on_guild_remove(guild):
guildID = guild.id
sql_execute = '''DELETE FROM prefixes WHERE id = (%s)'''
c.execute(sql_execute, (guildID,))
conn.commit()
client.run(KEY)
| en | 0.24242 | #INTENTS #intents.members = True #LOAD DATABASE SELECT * FROM prefixes WHERE id = (%s) #LOAD COGS #PREFIX COMMANDS SELECT FROM prefixes WHERE id = (%s) #Check if prefix exists #Delete prior prefix DELETE FROM prefixes WHERE id = (%s) #Add prefix to list INSERT INTO prefixes (id, prefix) VALUES (%s, %s) DELETE FROM prefixes WHERE id = (%s) | 2.4115 | 2 |
paramz/core/parameter_core.py | mzwiessele/mzparam | 49 | 6617031 | <reponame>mzwiessele/mzparam<filename>paramz/core/parameter_core.py
"""
Core module for parameterization.
This module implements all parameterization techniques, split up in modular bits.
Observable:
Observable Pattern for patameterization
"""
#===============================================================================
# Copyright (c) 2015, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramax nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
import re
import logging
from ..transformations import __fixed__, FIXED
from .constrainable import Constrainable
from .nameable import adjust_name_for_printing
from ..caching import FunctionCache
try:
from builtins import RecursionError as RE
except:
RE = RuntimeError
pass
class OptimizationHandlable(Constrainable):
"""
This enables optimization handles on an Object as done in GPy 0.4.
`..._optimizer_copy_transformed`: make sure the transformations and constraints etc are handled
"""
def __init__(self, name, default_constraint=None, *a, **kw):
super(OptimizationHandlable, self).__init__(name, default_constraint=default_constraint, *a, **kw)
self._optimizer_copy_ = None
self._optimizer_copy_transformed = False
#===========================================================================
# Optimizer copy
#===========================================================================
@property
def optimizer_array(self):
"""
Array for the optimizer to work on.
This array always lives in the space for the optimizer.
Thus, it is untransformed, going from Transformations.
Setting this array, will make sure the transformed parameters for this model
will be set accordingly. It has to be set with an array, retrieved from
this method, as e.g. fixing will resize the array.
The optimizer should only interfere with this array, such that transformations
are secured.
"""
if self.__dict__.get('_optimizer_copy_', None) is None or self.size != self._optimizer_copy_.size:
self._optimizer_copy_ = np.empty(self.size)
if not self._optimizer_copy_transformed:
self._optimizer_copy_.flat = self.param_array.flat
#py3 fix
#[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.items() if c != __fixed__]
self._optimizer_copy_transformed = True
if self._has_fixes():# or self._has_ties()):
self._ensure_fixes()
return self._optimizer_copy_[self._fixes_]
return self._optimizer_copy_
@optimizer_array.setter
def optimizer_array(self, p):
"""
Make sure the optimizer copy does not get touched, thus, we only want to
set the values *inside* not the array itself.
Also we want to update param_array in here.
"""
f = None
if self.has_parent() and self.constraints[__fixed__].size != 0:
f = np.ones(self.size).astype(bool)
f[self.constraints[__fixed__]] = FIXED
elif self._has_fixes():
f = self._fixes_
if f is None:
self.param_array.flat = p
[np.put(self.param_array, ind, c.f(self.param_array.flat[ind]))
#py3 fix
#for c, ind in self.constraints.iteritems() if c != __fixed__]
for c, ind in self.constraints.items() if c != __fixed__]
else:
self.param_array.flat[f] = p
[np.put(self.param_array, ind[f[ind]], c.f(self.param_array.flat[ind[f[ind]]]))
#py3 fix
#for c, ind in self.constraints.iteritems() if c != __fixed__]
for c, ind in self.constraints.items() if c != __fixed__]
#self._highest_parent_.tie.propagate_val()
self._optimizer_copy_transformed = False
self.trigger_update()
def _trigger_params_changed(self, trigger_parent=True):
"""
First tell all children to update,
then update yourself.
If trigger_parent is True, we will tell the parent, otherwise not.
"""
[p._trigger_params_changed(trigger_parent=False) for p in self.parameters if not p.is_fixed]
self.notify_observers(None, None if trigger_parent else -np.inf)
def _size_transformed(self):
"""
As fixes are not passed to the optimiser, the size of the model for the optimiser
is the size of all parameters minus the size of the fixes.
"""
return self.size - self.constraints[__fixed__].size
def _transform_gradients(self, g):
"""
Transform the gradients by multiplying the gradient factor for each
constraint to it.
"""
#py3 fix
#[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__]
if self._has_fixes(): return g[self._fixes_]
return g
#def _transform_gradients_non_natural(self, g):
# """
# Transform the gradients by multiplying the gradient factor for each
# constraint to it, using the theta transformed natural gradient.
# """
# #py3 fix
# #[np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
# [np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__]
# if self._has_fixes(): return g[self._fixes_]
# return g
@property
def num_params(self):
"""
Return the number of parameters of this parameter_handle.
Param objects will always return 0.
"""
raise NotImplemented("Abstract, please implement in respective classes")
def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True, intermediate=False):
"""
Get the names of all parameters of this model or parameter. It starts
from the parameterized object you are calling this method on.
Note: This does not unravel multidimensional parameters,
use parameter_names_flat to unravel parameters!
:param bool add_self: whether to add the own name in front of names
:param bool adjust_for_printing: whether to call `adjust_name_for_printing` on names
:param bool recursive: whether to traverse through hierarchy and append leaf node names
:param bool intermediate: whether to add intermediate names, that is parameterized objects
"""
if adjust_for_printing: adjust = adjust_name_for_printing
else: adjust = lambda x: x
names = []
if intermediate or (not recursive):
names.extend([adjust(x.name) for x in self.parameters])
if intermediate or recursive: names.extend([
xi for x in self.parameters for xi in
x.parameter_names(add_self=True,
adjust_for_printing=adjust_for_printing,
recursive=True,
intermediate=False)])
if add_self: names = map(lambda x: adjust(self.name) + "." + x, names)
return names
def parameter_names_flat(self, include_fixed=False):
"""
Return the flattened parameter names for all subsequent parameters
of this parameter. We do not include the name for self here!
If you want the names for fixed parameters as well in this list,
set include_fixed to True.
if not hasattr(obj, 'cache'):
obj.cache = FunctionCacher()
:param bool include_fixed: whether to include fixed names here.
"""
name_list = []
for p in self.flattened_parameters:
name = p.hierarchy_name()
if p.size > 1:
name_list.extend(["{}[{!s}]".format(name, i) for i in p._indices()])
else:
name_list.append(name)
name_list = np.array(name_list)
if not include_fixed and self._has_fixes():
return name_list[self._fixes_]
return name_list
#===========================================================================
# Randomizeable
#===========================================================================
def randomize(self, rand_gen=None, *args, **kwargs):
"""
Randomize the model.
Make this draw from the rand_gen if one exists, else draw random normal(0,1)
:param rand_gen: np random number generator which takes args and kwargs
:param flaot loc: loc parameter for random number generator
:param float scale: scale parameter for random number generator
:param args, kwargs: will be passed through to random number generator
"""
if rand_gen is None:
rand_gen = np.random.normal
# first take care of all parameters (from N(0,1))
x = rand_gen(size=self._size_transformed(), *args, **kwargs)
updates = self.update_model()
self.update_model(False) # Switch off the updates
self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...)
# now draw from prior where possible
x = self.param_array.copy()
unfixlist = np.ones((self.size,),dtype=np.bool)
unfixlist[self.constraints[__fixed__]] = False
self.param_array.flat[unfixlist] = x.view(np.ndarray).ravel()[unfixlist]
self.update_model(updates)
#===========================================================================
# For shared memory arrays. This does nothing in Param, but sets the memory
# for all parameterized objects
#===========================================================================
@property
def gradient_full(self):
"""
Note to users:
This does not return the gradient in the right shape! Use self.gradient
for the right gradient array.
To work on the gradient array, use this as the gradient handle.
This method exists for in memory use of parameters.
When trying to access the true gradient array, use this.
"""
self.gradient # <<< ensure _gradient_array_
return self._gradient_array_
def _propagate_param_grad(self, parray, garray):
"""
For propagating the param_array and gradient_array.
This ensures the in memory view of each subsequent array.
1.) connect param_array of children to self.param_array
2.) tell all children to propagate further
"""
#if self.param_array.size != self.size:
# self._param_array_ = np.empty(self.size, dtype=np.float64)
#if self.gradient.size != self.size:
# self._gradient_array_ = np.empty(self.size, dtype=np.float64)
pi_old_size = 0
for pi in self.parameters:
pislice = slice(pi_old_size, pi_old_size + pi.size)
self.param_array[pislice] = pi.param_array.flat # , requirements=['C', 'W']).flat
self.gradient_full[pislice] = pi.gradient_full.flat # , requirements=['C', 'W']).flat
pi.param_array.data = parray[pislice].data
pi.gradient_full.data = garray[pislice].data
pi._propagate_param_grad(parray[pislice], garray[pislice])
pi_old_size += pi.size
self._model_initialized_ = True
def _connect_parameters(self):
pass
_name_digit = re.compile("(?P<name>.*)_(?P<digit>\d+)$")
class Parameterizable(OptimizationHandlable):
"""
A parameterisable class.
This class provides the parameters list (ArrayList) and standard parameter handling,
such as {link|unlink}_parameter(), traverse hierarchy and param_array, gradient_array
and the empty parameters_changed().
This class is abstract and should not be instantiated.
Use paramz.Parameterized() as node (or leaf) in the parameterized hierarchy.
Use paramz.Param() for a leaf in the parameterized hierarchy.
"""
def __init__(self, *args, **kwargs):
super(Parameterizable, self).__init__(*args, **kwargs)
from .lists_and_dicts import ArrayList
self.parameters = ArrayList()
self._param_array_ = None
self._added_names_ = set()
self.logger = logging.getLogger(self.__class__.__name__)
self.__visited = False # for traversing in reverse order we need to know if we were here already
self.cache = FunctionCache()
def initialize_parameter(self):
"""
Call this function to initialize the model, if you built it without initialization.
This HAS to be called manually before optmizing or it will be causing
unexpected behaviour, if not errors!
"""
#logger.debug("connecting parameters")
self._highest_parent_._notify_parent_change()
self._highest_parent_._connect_parameters() #logger.debug("calling parameters changed")
self._highest_parent_._connect_fixes()
self.trigger_update()
@property
def param_array(self):
"""
Array representing the parameters of this class.
There is only one copy of all parameters in memory, two during optimization.
!WARNING!: setting the parameter array MUST always be done in memory:
m.param_array[:] = m_copy.param_array
"""
if (self.__dict__.get('_param_array_', None) is None) or (self._param_array_.size != self.size):
self._param_array_ = np.empty(self.size, dtype=np.float64)
return self._param_array_
@property
def unfixed_param_array(self):
"""
Array representing the parameters of this class.
There is only one copy of all parameters in memory, two during optimization.
!WARNING!: setting the parameter array MUST always be done in memory:
m.param_array[:] = m_copy.param_array
"""
if self.constraints[__fixed__].size !=0:
fixes = np.ones(self.size).astype(bool)
fixes[self.constraints[__fixed__]] = FIXED
return self._param_array_[fixes]
else:
return self._param_array_
def traverse(self, visit, *args, **kwargs):
"""
Traverse the hierarchy performing `visit(self, *args, **kwargs)`
at every node passed by downwards. This function includes self!
See *visitor pattern* in literature. This is implemented in pre-order fashion.
Example::
#Collect all children:
children = []
self.traverse(children.append)
print children
"""
if not self.__visited:
visit(self, *args, **kwargs)
self.__visited = True
self._traverse(visit, *args, **kwargs)
self.__visited = False
def _traverse(self, visit, *args, **kwargs):
for c in self.parameters:
c.traverse(visit, *args, **kwargs)
def traverse_parents(self, visit, *args, **kwargs):
"""
Traverse the hierarchy upwards, visiting all parents and their children except self.
See "visitor pattern" in literature. This is implemented in pre-order fashion.
Example:
parents = []
self.traverse_parents(parents.append)
print parents
"""
if self.has_parent():
self.__visited = True
self._parent_.traverse_parents(visit, *args, **kwargs)
self._parent_.traverse(visit, *args, **kwargs)
self.__visited = False
#===========================================================================
# Caching
#===========================================================================
def enable_caching(self):
def visit(self):
self.cache.enable_caching()
self.traverse(visit)
def disable_caching(self):
def visit(self):
self.cache.disable_caching()
self.traverse(visit)
#=========================================================================
# Gradient handling
#=========================================================================
@property
def gradient(self):
if (self.__dict__.get('_gradient_array_', None) is None) or self._gradient_array_.size != self.size:
self._gradient_array_ = np.empty(self.size, dtype=np.float64)
return self._gradient_array_
@gradient.setter
def gradient(self, val):
self._gradient_array_[:] = val
@property
def num_params(self):
return len(self.parameters)
def _add_parameter_name(self, param):
try:
pname = adjust_name_for_printing(param.name)
def warn_and_retry(param, match=None):
#===================================================================
# print """
# WARNING: added a parameter with formatted name {},
# which is already assigned to {}.
# Trying to change the parameter name to
#
# {}.{}
# """.format(pname, self.hierarchy_name(), self.hierarchy_name(), param.name + "_")
#===================================================================
if match is None:
param.name = param.name+"_1"
else:
param.name = match.group('name') + "_" + str(int(match.group('digit'))+1)
self._add_parameter_name(param)
# and makes sure to not delete programmatically added parameters
for other in self.parameters:
if (not (other is param)) and (other.name == param.name):
return warn_and_retry(other, _name_digit.match(other.name))
if pname not in dir(self):
self.__dict__[pname] = param
self._added_names_.add(pname)
else: # pname in self.__dict__
if pname in self._added_names_:
other = self.__dict__[pname]
#if not (param is other):
# del self.__dict__[pname]
# self._added_names_.remove(pname)
# warn_and_retry(other)
# warn_and_retry(param, _name_digit.match(other.name))
except RE:
raise RE("Maximum recursion depth reached, try naming the parts of your kernel uniquely to avoid naming conflicts.")
def _remove_parameter_name(self, param=None, pname=None):
assert param is None or pname is None, "can only delete either param by name, or the name of a param"
pname = adjust_name_for_printing(pname) or adjust_name_for_printing(param.name)
if pname in self._added_names_:
del self.__dict__[pname]
self._added_names_.remove(pname)
self._connect_parameters()
def _name_changed(self, param, old_name):
self._remove_parameter_name(None, old_name)
self._add_parameter_name(param)
def __setstate__(self, state):
super(Parameterizable, self).__setstate__(state)
self.logger = logging.getLogger(self.__class__.__name__)
return self
#===========================================================================
# notification system
#===========================================================================
def _parameters_changed_notification(self, me, which=None):
"""
In parameterizable we just need to make sure, that the next call to optimizer_array
will update the optimizer_array to the latest parameters
"""
self._optimizer_copy_transformed = False # tells the optimizer array to update on next request
self.parameters_changed()
def _pass_through_notify_observers(self, me, which=None):
self.notify_observers(which=which)
def _setup_observers(self):
"""
Setup the default observers
1: parameters_changed_notify
2: pass through to parent, if present
"""
self.add_observer(self, self._parameters_changed_notification, -100)
if self.has_parent():
self.add_observer(self._parent_, self._parent_._pass_through_notify_observers, -np.inf)
#===========================================================================
# From being parentable, we have to define the parent_change notification
#===========================================================================
def _notify_parent_change(self):
"""
Notify all parameters that the parent has changed
"""
for p in self.parameters:
p._parent_changed(self)
def parameters_changed(self):
"""
This method gets called when parameters have changed.
Another way of listening to param changes is to
add self as a listener to the param, such that
updates get passed through. See :py:function:``paramz.param.Observable.add_observer``
"""
pass
def save(self, filename, ftype='HDF5'): # pragma: no coverage
"""
Save all the model parameters into a file (HDF5 by default).
This is not supported yet. We are working on having a consistent,
human readable way of saving and loading GPy models. This only
saves the parameter array to a hdf5 file. In order
to load the model again, use the same script for building the model
you used to build this model. Then load the param array from this hdf5
file and set the parameters of the created model:
>>> m[:] = h5_file['param_array']
This is less then optimal, we are working on a better solution to that.
"""
from ..param import Param
def gather_params(self, plist):
if isinstance(self,Param):
plist.append(self)
plist = []
self.traverse(gather_params, plist)
names = self.parameter_names(adjust_for_printing=True)
if ftype=='HDF5':
try:
import h5py
f = h5py.File(filename,'w')
for p,n in zip(plist,names):
n = n.replace('.','_')
p = p.values
d = f.create_dataset(n,p.shape,dtype=p.dtype)
d[:] = p
if hasattr(self, 'param_array'):
d = f.create_dataset('param_array',self.param_array.shape, dtype=self.param_array.dtype)
d[:] = self.param_array
f.close()
except:
raise 'Fails to write the parameters into a HDF5 file!'
| """
Core module for parameterization.
This module implements all parameterization techniques, split up in modular bits.
Observable:
Observable Pattern for patameterization
"""
#===============================================================================
# Copyright (c) 2015, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramax nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
import re
import logging
from ..transformations import __fixed__, FIXED
from .constrainable import Constrainable
from .nameable import adjust_name_for_printing
from ..caching import FunctionCache
try:
from builtins import RecursionError as RE
except:
RE = RuntimeError
pass
class OptimizationHandlable(Constrainable):
"""
This enables optimization handles on an Object as done in GPy 0.4.
`..._optimizer_copy_transformed`: make sure the transformations and constraints etc are handled
"""
def __init__(self, name, default_constraint=None, *a, **kw):
super(OptimizationHandlable, self).__init__(name, default_constraint=default_constraint, *a, **kw)
self._optimizer_copy_ = None
self._optimizer_copy_transformed = False
#===========================================================================
# Optimizer copy
#===========================================================================
@property
def optimizer_array(self):
"""
Array for the optimizer to work on.
This array always lives in the space for the optimizer.
Thus, it is untransformed, going from Transformations.
Setting this array, will make sure the transformed parameters for this model
will be set accordingly. It has to be set with an array, retrieved from
this method, as e.g. fixing will resize the array.
The optimizer should only interfere with this array, such that transformations
are secured.
"""
if self.__dict__.get('_optimizer_copy_', None) is None or self.size != self._optimizer_copy_.size:
self._optimizer_copy_ = np.empty(self.size)
if not self._optimizer_copy_transformed:
self._optimizer_copy_.flat = self.param_array.flat
#py3 fix
#[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.items() if c != __fixed__]
self._optimizer_copy_transformed = True
if self._has_fixes():# or self._has_ties()):
self._ensure_fixes()
return self._optimizer_copy_[self._fixes_]
return self._optimizer_copy_
@optimizer_array.setter
def optimizer_array(self, p):
"""
Make sure the optimizer copy does not get touched, thus, we only want to
set the values *inside* not the array itself.
Also we want to update param_array in here.
"""
f = None
if self.has_parent() and self.constraints[__fixed__].size != 0:
f = np.ones(self.size).astype(bool)
f[self.constraints[__fixed__]] = FIXED
elif self._has_fixes():
f = self._fixes_
if f is None:
self.param_array.flat = p
[np.put(self.param_array, ind, c.f(self.param_array.flat[ind]))
#py3 fix
#for c, ind in self.constraints.iteritems() if c != __fixed__]
for c, ind in self.constraints.items() if c != __fixed__]
else:
self.param_array.flat[f] = p
[np.put(self.param_array, ind[f[ind]], c.f(self.param_array.flat[ind[f[ind]]]))
#py3 fix
#for c, ind in self.constraints.iteritems() if c != __fixed__]
for c, ind in self.constraints.items() if c != __fixed__]
#self._highest_parent_.tie.propagate_val()
self._optimizer_copy_transformed = False
self.trigger_update()
def _trigger_params_changed(self, trigger_parent=True):
"""
First tell all children to update,
then update yourself.
If trigger_parent is True, we will tell the parent, otherwise not.
"""
[p._trigger_params_changed(trigger_parent=False) for p in self.parameters if not p.is_fixed]
self.notify_observers(None, None if trigger_parent else -np.inf)
def _size_transformed(self):
"""
As fixes are not passed to the optimiser, the size of the model for the optimiser
is the size of all parameters minus the size of the fixes.
"""
return self.size - self.constraints[__fixed__].size
def _transform_gradients(self, g):
"""
Transform the gradients by multiplying the gradient factor for each
constraint to it.
"""
#py3 fix
#[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__]
if self._has_fixes(): return g[self._fixes_]
return g
#def _transform_gradients_non_natural(self, g):
# """
# Transform the gradients by multiplying the gradient factor for each
# constraint to it, using the theta transformed natural gradient.
# """
# #py3 fix
# #[np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
# [np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__]
# if self._has_fixes(): return g[self._fixes_]
# return g
@property
def num_params(self):
"""
Return the number of parameters of this parameter_handle.
Param objects will always return 0.
"""
raise NotImplemented("Abstract, please implement in respective classes")
def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True, intermediate=False):
"""
Get the names of all parameters of this model or parameter. It starts
from the parameterized object you are calling this method on.
Note: This does not unravel multidimensional parameters,
use parameter_names_flat to unravel parameters!
:param bool add_self: whether to add the own name in front of names
:param bool adjust_for_printing: whether to call `adjust_name_for_printing` on names
:param bool recursive: whether to traverse through hierarchy and append leaf node names
:param bool intermediate: whether to add intermediate names, that is parameterized objects
"""
if adjust_for_printing: adjust = adjust_name_for_printing
else: adjust = lambda x: x
names = []
if intermediate or (not recursive):
names.extend([adjust(x.name) for x in self.parameters])
if intermediate or recursive: names.extend([
xi for x in self.parameters for xi in
x.parameter_names(add_self=True,
adjust_for_printing=adjust_for_printing,
recursive=True,
intermediate=False)])
if add_self: names = map(lambda x: adjust(self.name) + "." + x, names)
return names
def parameter_names_flat(self, include_fixed=False):
"""
Return the flattened parameter names for all subsequent parameters
of this parameter. We do not include the name for self here!
If you want the names for fixed parameters as well in this list,
set include_fixed to True.
if not hasattr(obj, 'cache'):
obj.cache = FunctionCacher()
:param bool include_fixed: whether to include fixed names here.
"""
name_list = []
for p in self.flattened_parameters:
name = p.hierarchy_name()
if p.size > 1:
name_list.extend(["{}[{!s}]".format(name, i) for i in p._indices()])
else:
name_list.append(name)
name_list = np.array(name_list)
if not include_fixed and self._has_fixes():
return name_list[self._fixes_]
return name_list
#===========================================================================
# Randomizeable
#===========================================================================
def randomize(self, rand_gen=None, *args, **kwargs):
"""
Randomize the model.
Make this draw from the rand_gen if one exists, else draw random normal(0,1)
:param rand_gen: np random number generator which takes args and kwargs
:param flaot loc: loc parameter for random number generator
:param float scale: scale parameter for random number generator
:param args, kwargs: will be passed through to random number generator
"""
if rand_gen is None:
rand_gen = np.random.normal
# first take care of all parameters (from N(0,1))
x = rand_gen(size=self._size_transformed(), *args, **kwargs)
updates = self.update_model()
self.update_model(False) # Switch off the updates
self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...)
# now draw from prior where possible
x = self.param_array.copy()
unfixlist = np.ones((self.size,),dtype=np.bool)
unfixlist[self.constraints[__fixed__]] = False
self.param_array.flat[unfixlist] = x.view(np.ndarray).ravel()[unfixlist]
self.update_model(updates)
#===========================================================================
# For shared memory arrays. This does nothing in Param, but sets the memory
# for all parameterized objects
#===========================================================================
@property
def gradient_full(self):
"""
Note to users:
This does not return the gradient in the right shape! Use self.gradient
for the right gradient array.
To work on the gradient array, use this as the gradient handle.
This method exists for in memory use of parameters.
When trying to access the true gradient array, use this.
"""
self.gradient # <<< ensure _gradient_array_
return self._gradient_array_
def _propagate_param_grad(self, parray, garray):
"""
For propagating the param_array and gradient_array.
This ensures the in memory view of each subsequent array.
1.) connect param_array of children to self.param_array
2.) tell all children to propagate further
"""
#if self.param_array.size != self.size:
# self._param_array_ = np.empty(self.size, dtype=np.float64)
#if self.gradient.size != self.size:
# self._gradient_array_ = np.empty(self.size, dtype=np.float64)
pi_old_size = 0
for pi in self.parameters:
pislice = slice(pi_old_size, pi_old_size + pi.size)
self.param_array[pislice] = pi.param_array.flat # , requirements=['C', 'W']).flat
self.gradient_full[pislice] = pi.gradient_full.flat # , requirements=['C', 'W']).flat
pi.param_array.data = parray[pislice].data
pi.gradient_full.data = garray[pislice].data
pi._propagate_param_grad(parray[pislice], garray[pislice])
pi_old_size += pi.size
self._model_initialized_ = True
def _connect_parameters(self):
pass
_name_digit = re.compile("(?P<name>.*)_(?P<digit>\d+)$")
class Parameterizable(OptimizationHandlable):
"""
A parameterisable class.
This class provides the parameters list (ArrayList) and standard parameter handling,
such as {link|unlink}_parameter(), traverse hierarchy and param_array, gradient_array
and the empty parameters_changed().
This class is abstract and should not be instantiated.
Use paramz.Parameterized() as node (or leaf) in the parameterized hierarchy.
Use paramz.Param() for a leaf in the parameterized hierarchy.
"""
def __init__(self, *args, **kwargs):
super(Parameterizable, self).__init__(*args, **kwargs)
from .lists_and_dicts import ArrayList
self.parameters = ArrayList()
self._param_array_ = None
self._added_names_ = set()
self.logger = logging.getLogger(self.__class__.__name__)
self.__visited = False # for traversing in reverse order we need to know if we were here already
self.cache = FunctionCache()
def initialize_parameter(self):
"""
Call this function to initialize the model, if you built it without initialization.
This HAS to be called manually before optmizing or it will be causing
unexpected behaviour, if not errors!
"""
#logger.debug("connecting parameters")
self._highest_parent_._notify_parent_change()
self._highest_parent_._connect_parameters() #logger.debug("calling parameters changed")
self._highest_parent_._connect_fixes()
self.trigger_update()
@property
def param_array(self):
"""
Array representing the parameters of this class.
There is only one copy of all parameters in memory, two during optimization.
!WARNING!: setting the parameter array MUST always be done in memory:
m.param_array[:] = m_copy.param_array
"""
if (self.__dict__.get('_param_array_', None) is None) or (self._param_array_.size != self.size):
self._param_array_ = np.empty(self.size, dtype=np.float64)
return self._param_array_
@property
def unfixed_param_array(self):
"""
Array representing the parameters of this class.
There is only one copy of all parameters in memory, two during optimization.
!WARNING!: setting the parameter array MUST always be done in memory:
m.param_array[:] = m_copy.param_array
"""
if self.constraints[__fixed__].size !=0:
fixes = np.ones(self.size).astype(bool)
fixes[self.constraints[__fixed__]] = FIXED
return self._param_array_[fixes]
else:
return self._param_array_
def traverse(self, visit, *args, **kwargs):
"""
Traverse the hierarchy performing `visit(self, *args, **kwargs)`
at every node passed by downwards. This function includes self!
See *visitor pattern* in literature. This is implemented in pre-order fashion.
Example::
#Collect all children:
children = []
self.traverse(children.append)
print children
"""
if not self.__visited:
visit(self, *args, **kwargs)
self.__visited = True
self._traverse(visit, *args, **kwargs)
self.__visited = False
def _traverse(self, visit, *args, **kwargs):
for c in self.parameters:
c.traverse(visit, *args, **kwargs)
def traverse_parents(self, visit, *args, **kwargs):
"""
Traverse the hierarchy upwards, visiting all parents and their children except self.
See "visitor pattern" in literature. This is implemented in pre-order fashion.
Example:
parents = []
self.traverse_parents(parents.append)
print parents
"""
if self.has_parent():
self.__visited = True
self._parent_.traverse_parents(visit, *args, **kwargs)
self._parent_.traverse(visit, *args, **kwargs)
self.__visited = False
#===========================================================================
# Caching
#===========================================================================
def enable_caching(self):
def visit(self):
self.cache.enable_caching()
self.traverse(visit)
def disable_caching(self):
def visit(self):
self.cache.disable_caching()
self.traverse(visit)
#=========================================================================
# Gradient handling
#=========================================================================
@property
def gradient(self):
if (self.__dict__.get('_gradient_array_', None) is None) or self._gradient_array_.size != self.size:
self._gradient_array_ = np.empty(self.size, dtype=np.float64)
return self._gradient_array_
@gradient.setter
def gradient(self, val):
self._gradient_array_[:] = val
@property
def num_params(self):
return len(self.parameters)
def _add_parameter_name(self, param):
try:
pname = adjust_name_for_printing(param.name)
def warn_and_retry(param, match=None):
#===================================================================
# print """
# WARNING: added a parameter with formatted name {},
# which is already assigned to {}.
# Trying to change the parameter name to
#
# {}.{}
# """.format(pname, self.hierarchy_name(), self.hierarchy_name(), param.name + "_")
#===================================================================
if match is None:
param.name = param.name+"_1"
else:
param.name = match.group('name') + "_" + str(int(match.group('digit'))+1)
self._add_parameter_name(param)
# and makes sure to not delete programmatically added parameters
for other in self.parameters:
if (not (other is param)) and (other.name == param.name):
return warn_and_retry(other, _name_digit.match(other.name))
if pname not in dir(self):
self.__dict__[pname] = param
self._added_names_.add(pname)
else: # pname in self.__dict__
if pname in self._added_names_:
other = self.__dict__[pname]
#if not (param is other):
# del self.__dict__[pname]
# self._added_names_.remove(pname)
# warn_and_retry(other)
# warn_and_retry(param, _name_digit.match(other.name))
except RE:
raise RE("Maximum recursion depth reached, try naming the parts of your kernel uniquely to avoid naming conflicts.")
def _remove_parameter_name(self, param=None, pname=None):
assert param is None or pname is None, "can only delete either param by name, or the name of a param"
pname = adjust_name_for_printing(pname) or adjust_name_for_printing(param.name)
if pname in self._added_names_:
del self.__dict__[pname]
self._added_names_.remove(pname)
self._connect_parameters()
def _name_changed(self, param, old_name):
self._remove_parameter_name(None, old_name)
self._add_parameter_name(param)
def __setstate__(self, state):
super(Parameterizable, self).__setstate__(state)
self.logger = logging.getLogger(self.__class__.__name__)
return self
#===========================================================================
# notification system
#===========================================================================
def _parameters_changed_notification(self, me, which=None):
"""
In parameterizable we just need to make sure, that the next call to optimizer_array
will update the optimizer_array to the latest parameters
"""
self._optimizer_copy_transformed = False # tells the optimizer array to update on next request
self.parameters_changed()
def _pass_through_notify_observers(self, me, which=None):
self.notify_observers(which=which)
def _setup_observers(self):
"""
Setup the default observers
1: parameters_changed_notify
2: pass through to parent, if present
"""
self.add_observer(self, self._parameters_changed_notification, -100)
if self.has_parent():
self.add_observer(self._parent_, self._parent_._pass_through_notify_observers, -np.inf)
#===========================================================================
# From being parentable, we have to define the parent_change notification
#===========================================================================
def _notify_parent_change(self):
"""
Notify all parameters that the parent has changed
"""
for p in self.parameters:
p._parent_changed(self)
def parameters_changed(self):
"""
This method gets called when parameters have changed.
Another way of listening to param changes is to
add self as a listener to the param, such that
updates get passed through. See :py:function:``paramz.param.Observable.add_observer``
"""
pass
def save(self, filename, ftype='HDF5'): # pragma: no coverage
"""
Save all the model parameters into a file (HDF5 by default).
This is not supported yet. We are working on having a consistent,
human readable way of saving and loading GPy models. This only
saves the parameter array to a hdf5 file. In order
to load the model again, use the same script for building the model
you used to build this model. Then load the param array from this hdf5
file and set the parameters of the created model:
>>> m[:] = h5_file['param_array']
This is less then optimal, we are working on a better solution to that.
"""
from ..param import Param
def gather_params(self, plist):
if isinstance(self,Param):
plist.append(self)
plist = []
self.traverse(gather_params, plist)
names = self.parameter_names(adjust_for_printing=True)
if ftype=='HDF5':
try:
import h5py
f = h5py.File(filename,'w')
for p,n in zip(plist,names):
n = n.replace('.','_')
p = p.values
d = f.create_dataset(n,p.shape,dtype=p.dtype)
d[:] = p
if hasattr(self, 'param_array'):
d = f.create_dataset('param_array',self.param_array.shape, dtype=self.param_array.dtype)
d[:] = self.param_array
f.close()
except:
raise 'Fails to write the parameters into a HDF5 file!' | en | 0.632622 | Core module for parameterization. This module implements all parameterization techniques, split up in modular bits. Observable: Observable Pattern for patameterization #=============================================================================== # Copyright (c) 2015, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of paramax nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #=============================================================================== This enables optimization handles on an Object as done in GPy 0.4. `..._optimizer_copy_transformed`: make sure the transformations and constraints etc are handled #=========================================================================== # Optimizer copy #=========================================================================== Array for the optimizer to work on. This array always lives in the space for the optimizer. Thus, it is untransformed, going from Transformations. Setting this array, will make sure the transformed parameters for this model will be set accordingly. It has to be set with an array, retrieved from this method, as e.g. fixing will resize the array. The optimizer should only interfere with this array, such that transformations are secured. #py3 fix #[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] # or self._has_ties()): Make sure the optimizer copy does not get touched, thus, we only want to set the values *inside* not the array itself. Also we want to update param_array in here. #py3 fix #for c, ind in self.constraints.iteritems() if c != __fixed__] #py3 fix #for c, ind in self.constraints.iteritems() if c != __fixed__] #self._highest_parent_.tie.propagate_val() First tell all children to update, then update yourself. If trigger_parent is True, we will tell the parent, otherwise not. As fixes are not passed to the optimiser, the size of the model for the optimiser is the size of all parameters minus the size of the fixes. Transform the gradients by multiplying the gradient factor for each constraint to it. #py3 fix #[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__] #def _transform_gradients_non_natural(self, g): # """ # Transform the gradients by multiplying the gradient factor for each # constraint to it, using the theta transformed natural gradient. # """ # #py3 fix # #[np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__] # [np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__] # if self._has_fixes(): return g[self._fixes_] # return g Return the number of parameters of this parameter_handle. Param objects will always return 0. Get the names of all parameters of this model or parameter. It starts from the parameterized object you are calling this method on. Note: This does not unravel multidimensional parameters, use parameter_names_flat to unravel parameters! :param bool add_self: whether to add the own name in front of names :param bool adjust_for_printing: whether to call `adjust_name_for_printing` on names :param bool recursive: whether to traverse through hierarchy and append leaf node names :param bool intermediate: whether to add intermediate names, that is parameterized objects Return the flattened parameter names for all subsequent parameters of this parameter. We do not include the name for self here! If you want the names for fixed parameters as well in this list, set include_fixed to True. if not hasattr(obj, 'cache'): obj.cache = FunctionCacher() :param bool include_fixed: whether to include fixed names here. #=========================================================================== # Randomizeable #=========================================================================== Randomize the model. Make this draw from the rand_gen if one exists, else draw random normal(0,1) :param rand_gen: np random number generator which takes args and kwargs :param flaot loc: loc parameter for random number generator :param float scale: scale parameter for random number generator :param args, kwargs: will be passed through to random number generator # first take care of all parameters (from N(0,1)) # Switch off the updates # makes sure all of the tied parameters get the same init (since there's only one prior object...) # now draw from prior where possible #=========================================================================== # For shared memory arrays. This does nothing in Param, but sets the memory # for all parameterized objects #=========================================================================== Note to users: This does not return the gradient in the right shape! Use self.gradient for the right gradient array. To work on the gradient array, use this as the gradient handle. This method exists for in memory use of parameters. When trying to access the true gradient array, use this. # <<< ensure _gradient_array_ For propagating the param_array and gradient_array. This ensures the in memory view of each subsequent array. 1.) connect param_array of children to self.param_array 2.) tell all children to propagate further #if self.param_array.size != self.size: # self._param_array_ = np.empty(self.size, dtype=np.float64) #if self.gradient.size != self.size: # self._gradient_array_ = np.empty(self.size, dtype=np.float64) # , requirements=['C', 'W']).flat # , requirements=['C', 'W']).flat A parameterisable class. This class provides the parameters list (ArrayList) and standard parameter handling, such as {link|unlink}_parameter(), traverse hierarchy and param_array, gradient_array and the empty parameters_changed(). This class is abstract and should not be instantiated. Use paramz.Parameterized() as node (or leaf) in the parameterized hierarchy. Use paramz.Param() for a leaf in the parameterized hierarchy. # for traversing in reverse order we need to know if we were here already Call this function to initialize the model, if you built it without initialization. This HAS to be called manually before optmizing or it will be causing unexpected behaviour, if not errors! #logger.debug("connecting parameters") #logger.debug("calling parameters changed") Array representing the parameters of this class. There is only one copy of all parameters in memory, two during optimization. !WARNING!: setting the parameter array MUST always be done in memory: m.param_array[:] = m_copy.param_array Array representing the parameters of this class. There is only one copy of all parameters in memory, two during optimization. !WARNING!: setting the parameter array MUST always be done in memory: m.param_array[:] = m_copy.param_array Traverse the hierarchy performing `visit(self, *args, **kwargs)` at every node passed by downwards. This function includes self! See *visitor pattern* in literature. This is implemented in pre-order fashion. Example:: #Collect all children: children = [] self.traverse(children.append) print children Traverse the hierarchy upwards, visiting all parents and their children except self. See "visitor pattern" in literature. This is implemented in pre-order fashion. Example: parents = [] self.traverse_parents(parents.append) print parents #=========================================================================== # Caching #=========================================================================== #========================================================================= # Gradient handling #========================================================================= #=================================================================== # print """ # WARNING: added a parameter with formatted name {}, # which is already assigned to {}. # Trying to change the parameter name to # # {}.{} # """.format(pname, self.hierarchy_name(), self.hierarchy_name(), param.name + "_") #=================================================================== # and makes sure to not delete programmatically added parameters # pname in self.__dict__ #if not (param is other): # del self.__dict__[pname] # self._added_names_.remove(pname) # warn_and_retry(other) # warn_and_retry(param, _name_digit.match(other.name)) #=========================================================================== # notification system #=========================================================================== In parameterizable we just need to make sure, that the next call to optimizer_array will update the optimizer_array to the latest parameters # tells the optimizer array to update on next request Setup the default observers 1: parameters_changed_notify 2: pass through to parent, if present #=========================================================================== # From being parentable, we have to define the parent_change notification #=========================================================================== Notify all parameters that the parent has changed This method gets called when parameters have changed. Another way of listening to param changes is to add self as a listener to the param, such that updates get passed through. See :py:function:``paramz.param.Observable.add_observer`` # pragma: no coverage Save all the model parameters into a file (HDF5 by default). This is not supported yet. We are working on having a consistent, human readable way of saving and loading GPy models. This only saves the parameter array to a hdf5 file. In order to load the model again, use the same script for building the model you used to build this model. Then load the param array from this hdf5 file and set the parameters of the created model: >>> m[:] = h5_file['param_array'] This is less then optimal, we are working on a better solution to that. | 1.924753 | 2 |
pythonExamples/FirstPythonLect/pythonLect3.py | davidruffner/computation-physics-nyu-2009 | 0 | 6617032 | <filename>pythonExamples/FirstPythonLect/pythonLect3.py
a =' false'
print bool(a)
MyString = ''
if MyString:
print 'true'
else:
print 'false'
| <filename>pythonExamples/FirstPythonLect/pythonLect3.py
a =' false'
print bool(a)
MyString = ''
if MyString:
print 'true'
else:
print 'false'
| none | 1 | 3.026273 | 3 | |
examples/redis_cache.py | marching-cube/python-mysql-replication | 1,880 | 6617033 | <gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Update a redis server cache when an evenement is trigger
# in MySQL replication log
#
import redis
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import (
DeleteRowsEvent,
UpdateRowsEvent,
WriteRowsEvent,
)
MYSQL_SETTINGS = {
"host": "127.0.0.1",
"port": 3306,
"user": "root",
"passwd": ""
}
def main():
r = redis.Redis()
stream = BinLogStreamReader(
connection_settings=MYSQL_SETTINGS,
only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent])
for binlogevent in stream:
prefix = "%s:%s:" % (binlogevent.schema, binlogevent.table)
for row in binlogevent.rows:
if isinstance(binlogevent, DeleteRowsEvent):
vals = row["values"]
r.delete(prefix + str(vals["id"]))
elif isinstance(binlogevent, UpdateRowsEvent):
vals = row["after_values"]
r.hmset(prefix + str(vals["id"]), vals)
elif isinstance(binlogevent, WriteRowsEvent):
vals = row["values"]
r.hmset(prefix + str(vals["id"]), vals)
stream.close()
if __name__ == "__main__":
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Update a redis server cache when an evenement is trigger
# in MySQL replication log
#
import redis
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import (
DeleteRowsEvent,
UpdateRowsEvent,
WriteRowsEvent,
)
MYSQL_SETTINGS = {
"host": "127.0.0.1",
"port": 3306,
"user": "root",
"passwd": ""
}
def main():
r = redis.Redis()
stream = BinLogStreamReader(
connection_settings=MYSQL_SETTINGS,
only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent])
for binlogevent in stream:
prefix = "%s:%s:" % (binlogevent.schema, binlogevent.table)
for row in binlogevent.rows:
if isinstance(binlogevent, DeleteRowsEvent):
vals = row["values"]
r.delete(prefix + str(vals["id"]))
elif isinstance(binlogevent, UpdateRowsEvent):
vals = row["after_values"]
r.hmset(prefix + str(vals["id"]), vals)
elif isinstance(binlogevent, WriteRowsEvent):
vals = row["values"]
r.hmset(prefix + str(vals["id"]), vals)
stream.close()
if __name__ == "__main__":
main() | en | 0.561459 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # Update a redis server cache when an evenement is trigger # in MySQL replication log # | 2.217927 | 2 |
api/roadnet_tf_2/__init__.py | hieubkvn123/RoadNetDemo | 0 | 6617034 | <reponame>hieubkvn123/RoadNetDemo<filename>api/roadnet_tf_2/__init__.py
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from roadnet_tf_2.roadnet import RoadNet
from tensorflow.keras.models import Model
MODEL_CHECKPOINT = 'roadnet_tf_2/models/model_4.weights.hdf5'
TEST_IMG = '../data/1/Ottawa-1.tif'
original = cv2.imread(TEST_IMG)
#gt_segment = cv2.imread(TEST_LBL)
model = RoadNet().get_model()
model.load_weights(MODEL_CHECKPOINT)
model = Model(model.inputs, [
model.get_layer('surface_final_output').output,
model.get_layer('line_final_output').output,
model.get_layer('edge_final_output').output
])
def crop_image(img, crop_size=(128,128)):
crops = []
H, W = img.shape[0], img.shape[1]
### Get the ratio to resize ###
ratio_h = int(H/crop_size[0])
ratio_w = int(W/crop_size[1])
### get the refined resize dimensions ###
resized_dimensions = (crop_size[1] * ratio_w , crop_size[0] * ratio_h)
### resize the image ###
img_resize = cv2.resize(img, resized_dimensions)
### Divide the images into chunks of 128 x 128 squares ###
for i in range(ratio_h):
for j in range(ratio_w):
crop = img_resize[i*crop_size[0]: (i+1)*crop_size[0], j*crop_size[1]:(j+1)*crop_size[1]]
crops.append(crop)
return crops, ratio_h, ratio_w
# img = cv2.imread(TEST_IMG)
def make_prediction(img):
crops, ratio_h, ratio_w = crop_image(img)
crops = np.array(crops)
print('[INFO] Running full prediction on segmentation')
full_image = None
full_image_line = None
full_image_edge = None
def parse_to_binary_map(map_):
### Make foreground black and background white ###
''' Easier to parse to geojson later on'''
map_[map_ > 0.5] = 1
map_[map_ < 0.5] = 0
return map_
for i in range(ratio_h):
horizontal_image = None
horizontal_image_line = None
horizontal_image_edge = None
for j in range(ratio_w):
index = ratio_w * i + j
map_, line, edge = model.predict(np.array([crops[index]]))
map_ = parse_to_binary_map(map_[0])
line = parse_to_binary_map(line[0])
edge = parse_to_binary_map(edge[0])
if(j == 0):
horizontal_image = map_
horizontal_image_line = line
horizontal_image_edge = edge
else:
horizontal_image = cv2.hconcat([horizontal_image, map_])
horizontal_image_line = cv2.hconcat([horizontal_image_line, line])
horizontal_image_edge = cv2.hconcat([horizontal_image_edge, edge])
if(i == 0):
full_image = horizontal_image
full_image_line = horizontal_image_line
full_image_edge = horizontal_image_edge
else:
full_image = cv2.vconcat([full_image, horizontal_image])
full_image_line = cv2.vconcat([full_image_line, horizontal_image_line])
full_image_edge = cv2.vconcat([full_image_edge, horizontal_image_edge])
basename = os.path.basename(TEST_IMG)
filename = basename.split('.')[0]
save_file_name = "sample_predictions/full_prediction_" + filename + ".jpg"
line_file_name = "sample_predictions/centerline_" + filename + ".jpg"
full_image *= 255
full_image = full_image.astype(np.uint8)
full_image_line_3d = np.zeros((full_image.shape[0], full_image.shape[1], 3))
full_image_line_3d[full_image_line == 0] = [0, 0, 0]
full_image_line_3d[full_image_line == 1] = [255,255,255]
### dilate image abit for readability ###
#kernel = np.ones((5,5), np.uint8)
#full_image_line_3d = cv2.dilate(full_image_line_3d, kernel, iterations=1)
full_image_line_3d = 255 - full_image_line_3d
full_image_edge_3d = np.zeros((full_image.shape[0], full_image.shape[1], 3))
full_image_edge_3d[full_image_edge == 0] = [0, 0, 0]
full_image_edge_3d[full_image_edge == 1] = [255,255,255]
### dilate image abit for readability ###
kernel = np.ones((5,5), np.uint8)
full_image_edge_3d = cv2.dilate(full_image_edge_3d, kernel, iterations=2)
full_image_edge_3d = cv2.erode(full_image_edge_3d, kernel, iterations=1)
full_image_edge_3d = 255 - full_image_edge_3d ### Invert the image ###
print('[INFO] Saving prediction result in %s' % save_file_name)
# cv2.imwrite(line_file_name, full_image_line_3d)
return full_image
| import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from roadnet_tf_2.roadnet import RoadNet
from tensorflow.keras.models import Model
MODEL_CHECKPOINT = 'roadnet_tf_2/models/model_4.weights.hdf5'
TEST_IMG = '../data/1/Ottawa-1.tif'
original = cv2.imread(TEST_IMG)
#gt_segment = cv2.imread(TEST_LBL)
model = RoadNet().get_model()
model.load_weights(MODEL_CHECKPOINT)
model = Model(model.inputs, [
model.get_layer('surface_final_output').output,
model.get_layer('line_final_output').output,
model.get_layer('edge_final_output').output
])
def crop_image(img, crop_size=(128,128)):
crops = []
H, W = img.shape[0], img.shape[1]
### Get the ratio to resize ###
ratio_h = int(H/crop_size[0])
ratio_w = int(W/crop_size[1])
### get the refined resize dimensions ###
resized_dimensions = (crop_size[1] * ratio_w , crop_size[0] * ratio_h)
### resize the image ###
img_resize = cv2.resize(img, resized_dimensions)
### Divide the images into chunks of 128 x 128 squares ###
for i in range(ratio_h):
for j in range(ratio_w):
crop = img_resize[i*crop_size[0]: (i+1)*crop_size[0], j*crop_size[1]:(j+1)*crop_size[1]]
crops.append(crop)
return crops, ratio_h, ratio_w
# img = cv2.imread(TEST_IMG)
def make_prediction(img):
crops, ratio_h, ratio_w = crop_image(img)
crops = np.array(crops)
print('[INFO] Running full prediction on segmentation')
full_image = None
full_image_line = None
full_image_edge = None
def parse_to_binary_map(map_):
### Make foreground black and background white ###
''' Easier to parse to geojson later on'''
map_[map_ > 0.5] = 1
map_[map_ < 0.5] = 0
return map_
for i in range(ratio_h):
horizontal_image = None
horizontal_image_line = None
horizontal_image_edge = None
for j in range(ratio_w):
index = ratio_w * i + j
map_, line, edge = model.predict(np.array([crops[index]]))
map_ = parse_to_binary_map(map_[0])
line = parse_to_binary_map(line[0])
edge = parse_to_binary_map(edge[0])
if(j == 0):
horizontal_image = map_
horizontal_image_line = line
horizontal_image_edge = edge
else:
horizontal_image = cv2.hconcat([horizontal_image, map_])
horizontal_image_line = cv2.hconcat([horizontal_image_line, line])
horizontal_image_edge = cv2.hconcat([horizontal_image_edge, edge])
if(i == 0):
full_image = horizontal_image
full_image_line = horizontal_image_line
full_image_edge = horizontal_image_edge
else:
full_image = cv2.vconcat([full_image, horizontal_image])
full_image_line = cv2.vconcat([full_image_line, horizontal_image_line])
full_image_edge = cv2.vconcat([full_image_edge, horizontal_image_edge])
basename = os.path.basename(TEST_IMG)
filename = basename.split('.')[0]
save_file_name = "sample_predictions/full_prediction_" + filename + ".jpg"
line_file_name = "sample_predictions/centerline_" + filename + ".jpg"
full_image *= 255
full_image = full_image.astype(np.uint8)
full_image_line_3d = np.zeros((full_image.shape[0], full_image.shape[1], 3))
full_image_line_3d[full_image_line == 0] = [0, 0, 0]
full_image_line_3d[full_image_line == 1] = [255,255,255]
### dilate image abit for readability ###
#kernel = np.ones((5,5), np.uint8)
#full_image_line_3d = cv2.dilate(full_image_line_3d, kernel, iterations=1)
full_image_line_3d = 255 - full_image_line_3d
full_image_edge_3d = np.zeros((full_image.shape[0], full_image.shape[1], 3))
full_image_edge_3d[full_image_edge == 0] = [0, 0, 0]
full_image_edge_3d[full_image_edge == 1] = [255,255,255]
### dilate image abit for readability ###
kernel = np.ones((5,5), np.uint8)
full_image_edge_3d = cv2.dilate(full_image_edge_3d, kernel, iterations=2)
full_image_edge_3d = cv2.erode(full_image_edge_3d, kernel, iterations=1)
full_image_edge_3d = 255 - full_image_edge_3d ### Invert the image ###
print('[INFO] Saving prediction result in %s' % save_file_name)
# cv2.imwrite(line_file_name, full_image_line_3d)
return full_image | en | 0.5372 | #gt_segment = cv2.imread(TEST_LBL) ### Get the ratio to resize ### ### get the refined resize dimensions ### ### resize the image ### ### Divide the images into chunks of 128 x 128 squares ### # img = cv2.imread(TEST_IMG) ### Make foreground black and background white ### Easier to parse to geojson later on ### dilate image abit for readability ### #kernel = np.ones((5,5), np.uint8) #full_image_line_3d = cv2.dilate(full_image_line_3d, kernel, iterations=1) ### dilate image abit for readability ### ### Invert the image ### # cv2.imwrite(line_file_name, full_image_line_3d) | 2.424907 | 2 |
src/Utility/Utils.py | alexanu/TradingBot | 0 | 6617035 | import logging
import json
from enum import Enum
class TradeDirection(Enum):
"""
Enumeration that represents the trade direction in the market: NONE means
no action to take.
"""
NONE = "NONE"
BUY = "BUY"
SELL = "SELL"
class MarketClosedException(Exception):
"""Error to notify that the market is currently closed"""
pass
class NotSafeToTradeException(Exception):
"""Error to notify that it is not safe to trade"""
pass
class Utils:
"""
Utility class containing static methods to perform simple general actions
"""
def __init__(self):
pass
@staticmethod
def midpoint(p1, p2):
"""Return the midpoint"""
return (p1 + p2) / 2
@staticmethod
def percentage_of(percent, whole):
"""Return the value of the percentage on the whole"""
return (percent * whole) / 100.0
@staticmethod
def percentage(part, whole):
"""Return the percentage value of the part on the whole"""
return 100 * float(part) / float(whole)
@staticmethod
def is_between(time, time_range):
"""Return True if time is between the time_range. time must be a string.
time_range must be a tuple (a,b) where a and b are strings in format 'HH:MM'"""
if time_range[1] < time_range[0]:
return time >= time_range[0] or time <= time_range[1]
return time_range[0] <= time <= time_range[1]
@staticmethod
def humanize_time(secs):
"""Convert the given time (in seconds) into a readable format hh:mm:ss"""
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
return "%02d:%02d:%02d" % (hours, mins, secs)
| import logging
import json
from enum import Enum
class TradeDirection(Enum):
"""
Enumeration that represents the trade direction in the market: NONE means
no action to take.
"""
NONE = "NONE"
BUY = "BUY"
SELL = "SELL"
class MarketClosedException(Exception):
"""Error to notify that the market is currently closed"""
pass
class NotSafeToTradeException(Exception):
"""Error to notify that it is not safe to trade"""
pass
class Utils:
"""
Utility class containing static methods to perform simple general actions
"""
def __init__(self):
pass
@staticmethod
def midpoint(p1, p2):
"""Return the midpoint"""
return (p1 + p2) / 2
@staticmethod
def percentage_of(percent, whole):
"""Return the value of the percentage on the whole"""
return (percent * whole) / 100.0
@staticmethod
def percentage(part, whole):
"""Return the percentage value of the part on the whole"""
return 100 * float(part) / float(whole)
@staticmethod
def is_between(time, time_range):
"""Return True if time is between the time_range. time must be a string.
time_range must be a tuple (a,b) where a and b are strings in format 'HH:MM'"""
if time_range[1] < time_range[0]:
return time >= time_range[0] or time <= time_range[1]
return time_range[0] <= time <= time_range[1]
@staticmethod
def humanize_time(secs):
"""Convert the given time (in seconds) into a readable format hh:mm:ss"""
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
return "%02d:%02d:%02d" % (hours, mins, secs)
| en | 0.89477 | Enumeration that represents the trade direction in the market: NONE means no action to take. Error to notify that the market is currently closed Error to notify that it is not safe to trade Utility class containing static methods to perform simple general actions Return the midpoint Return the value of the percentage on the whole Return the percentage value of the part on the whole Return True if time is between the time_range. time must be a string. time_range must be a tuple (a,b) where a and b are strings in format 'HH:MM' Convert the given time (in seconds) into a readable format hh:mm:ss | 3.687263 | 4 |
2019/07/01-EASY-Maximum-value/Solutions/jonathan-vidal/solution.py | jonathan-vidal/daily-questions | 0 | 6617036 | def my_max(num_list):
curr_max = float('-inf')
for num in num_list:
if num > curr_max:
curr_max = num
return curr_max | def my_max(num_list):
curr_max = float('-inf')
for num in num_list:
if num > curr_max:
curr_max = num
return curr_max | none | 1 | 3.608859 | 4 | |
easy_scrape/body_data.py | sarthaknegi/easy_scrape | 3 | 6617037 | <gh_stars>1-10
import pandas as pd
def tbody_data(pretty_table, main_dataframe, flag, choice):
'''
:param pretty_table: this is table data
:param main_dataframe: the dataframe wherr the table data will be stored
:param flag: this will tell if the dataframe have headers or not. o says no and 1 says yes.
:param choice: this variable tells the which table to scrape if there are multiple tables with the same table name.
:return: dataframe containing the table data
'''
tbody = pretty_table[choice].find_all('tbody')
thead = tbody[0].find_all('th')
if thead and flag == 1:
thead_list = []
for th in thead:
thead_list.append(th.text)
main_dataframe = pd.DataFrame(columns=thead_list)
pos = 0 #to update the rows
all_trs = tbody[0].find_all('tr')
for tr in all_trs:
data_list = []
for data in tr.find_all('td'):
data_list.append(data.text)
try:
main_dataframe.loc[pos] = data_list
except:
print(len(main_dataframe))
if len(main_dataframe) >= 1:
thead_list = list(main_dataframe.columns)
new_thead_list = thead_list[1:]
main_dataframe = pd.DataFrame(columns=new_thead_list)
try:
main_dataframe.loc[pos] = data_list
except:
main_dataframe = pd.DataFrame(columns=thead_list)
else:
main_dataframe = pd.DataFrame(columns=list(range(len(data_list))))
main_dataframe.loc[pos] = data_list
pos+=1
return main_dataframe
| import pandas as pd
def tbody_data(pretty_table, main_dataframe, flag, choice):
'''
:param pretty_table: this is table data
:param main_dataframe: the dataframe wherr the table data will be stored
:param flag: this will tell if the dataframe have headers or not. o says no and 1 says yes.
:param choice: this variable tells the which table to scrape if there are multiple tables with the same table name.
:return: dataframe containing the table data
'''
tbody = pretty_table[choice].find_all('tbody')
thead = tbody[0].find_all('th')
if thead and flag == 1:
thead_list = []
for th in thead:
thead_list.append(th.text)
main_dataframe = pd.DataFrame(columns=thead_list)
pos = 0 #to update the rows
all_trs = tbody[0].find_all('tr')
for tr in all_trs:
data_list = []
for data in tr.find_all('td'):
data_list.append(data.text)
try:
main_dataframe.loc[pos] = data_list
except:
print(len(main_dataframe))
if len(main_dataframe) >= 1:
thead_list = list(main_dataframe.columns)
new_thead_list = thead_list[1:]
main_dataframe = pd.DataFrame(columns=new_thead_list)
try:
main_dataframe.loc[pos] = data_list
except:
main_dataframe = pd.DataFrame(columns=thead_list)
else:
main_dataframe = pd.DataFrame(columns=list(range(len(data_list))))
main_dataframe.loc[pos] = data_list
pos+=1
return main_dataframe | en | 0.59821 | :param pretty_table: this is table data :param main_dataframe: the dataframe wherr the table data will be stored :param flag: this will tell if the dataframe have headers or not. o says no and 1 says yes. :param choice: this variable tells the which table to scrape if there are multiple tables with the same table name. :return: dataframe containing the table data #to update the rows | 3.278107 | 3 |
scripts/npc/drang_room1.py | pantskun/swordiemen | 0 | 6617038 | # Parwen (2111006) | Authorized Personnel Only (261020401)
parwenKnows = 3320
deLangPotion = 3354
dranLab = 926120200
if sm.hasQuest(parwenKnows) or sm.hasQuestCompleted(parwenKnows) and not sm.hasQuest(deLangPotion) and not sm.hasQuestCompleted(deLangPotion):
response = sm.sendAskYesNo("Are you ready to visit #m" + repr(dranLab) + "#?")
if response:
sm.warpInstanceIn(dranLab, 0)
else:
if sm.hasQuest(deLangPotion) or sm.hasQuestCompleted(deLangPotion):
sm.sendSayOkay("You really don't need to see that alchemist again, do you?")
else:
sm.sendSayOkay("You're not ready for this yet.") | # Parwen (2111006) | Authorized Personnel Only (261020401)
parwenKnows = 3320
deLangPotion = 3354
dranLab = 926120200
if sm.hasQuest(parwenKnows) or sm.hasQuestCompleted(parwenKnows) and not sm.hasQuest(deLangPotion) and not sm.hasQuestCompleted(deLangPotion):
response = sm.sendAskYesNo("Are you ready to visit #m" + repr(dranLab) + "#?")
if response:
sm.warpInstanceIn(dranLab, 0)
else:
if sm.hasQuest(deLangPotion) or sm.hasQuestCompleted(deLangPotion):
sm.sendSayOkay("You really don't need to see that alchemist again, do you?")
else:
sm.sendSayOkay("You're not ready for this yet.") | en | 0.379749 | # Parwen (2111006) | Authorized Personnel Only (261020401) #m" + repr(dranLab) + "#?") | 2.203742 | 2 |
examples/run_trainer.py | ProgrammerNeoo/ColossalAI | 1 | 6617039 | <gh_stars>1-10
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import colossalai
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.trainer import Trainer
def run_trainer():
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize()
logger = get_global_dist_logger()
schedule.data_sync = False
engine = Engine(
model=model,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
schedule=schedule
)
logger.info("engine is built", ranks=[0])
trainer = Trainer(engine=engine,
hooks_cfg=gpc.config.hooks,
verbose=True)
logger.info("trainer is built", ranks=[0])
logger.info("start training", ranks=[0])
trainer.fit(
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
max_epochs=gpc.config.num_epochs,
display_progress=True,
test_interval=2
)
if __name__ == '__main__':
run_trainer()
| #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import colossalai
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.trainer import Trainer
def run_trainer():
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize()
logger = get_global_dist_logger()
schedule.data_sync = False
engine = Engine(
model=model,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
schedule=schedule
)
logger.info("engine is built", ranks=[0])
trainer = Trainer(engine=engine,
hooks_cfg=gpc.config.hooks,
verbose=True)
logger.info("trainer is built", ranks=[0])
logger.info("start training", ranks=[0])
trainer.fit(
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
max_epochs=gpc.config.num_epochs,
display_progress=True,
test_interval=2
)
if __name__ == '__main__':
run_trainer() | en | 0.367975 | #!/usr/bin/env python # -*- encoding: utf-8 -*- | 2.160286 | 2 |
project/createNC.py | wesleybowman/karsten | 1 | 6617040 | import netCDF4 as nc
def createNC(data):
ncFile = nc.Dataset('test.nc', 'w', format='NETCDF4')
#ncgrp = ncFile.createGroup('regioned')
#ncgrp.createDimension('dim', None)
#ncgrp = ncFile.createGroup('regioned')
#ncFile.createDimension('dimTest', None)
ncFile.createDimension('dim', None)
time = ncFile.createVariable('time', 'f8', ('dim',))
time[:] = data['time']
x = ncFile.createVariable('x', 'f8', ('dim',))
x[:] = data['x']
y = ncFile.createVariable('y', 'f8', ('dim',))
y[:] = data['y']
xc = ncFile.createVariable('xc', 'f8', ('dim',))
xc[:] = data['xc']
yc = ncFile.createVariable('yc', 'f8', ('dim',))
yc[:] = data['yc']
h = ncFile.createVariable('h', 'f8', ('dim',))
h[:] = data['h']
lon = ncFile.createVariable('lon', 'f8', ('dim',))
lon[:] = data['lon']
lat = ncFile.createVariable('lat', 'f8', ('dim',))
lat[:] = data['lat']
lonc = ncFile.createVariable('lonc', 'f8', ('dim',))
lonc[:] = data['lonc']
latc = ncFile.createVariable('latc', 'f8', ('dim',))
latc[:] = data['latc']
elev = ncFile.createVariable('elev', 'f8', ('dim', 'dim'))
elev[:] = data['elev']
ua = ncFile.createVariable('ua', 'f8', ('dim', 'dim'))
ua[:] = data['ua']
va = ncFile.createVariable('va', 'f8', ('dim', 'dim'))
va[:] = data['va']
node_index = ncFile.createVariable('node_index', 'f8', ('dim',))
node_index[:] = data['node_index']
element_index = ncFile.createVariable('element_index', 'f8', ('dim',))
element_index[:] = data['element_index']
nbe = ncFile.createVariable('nbe', 'f8', ('dim', 'dim'))
nbe[:] = data['nbe']
nv = ncFile.createVariable('nv', 'f8', ('dim', 'dim'))
nv[:] = data['nv']
a1u = ncFile.createVariable('a1u', 'f8', ('dim', 'dim'))
a1u[:] = data['a1u']
a2u = ncFile.createVariable('a2u', 'f8', ('dim', 'dim'))
a2u[:] = data['a2u']
aw0 = ncFile.createVariable('aw0', 'f8', ('dim', 'dim'))
aw0[:] = data['aw0']
awx = ncFile.createVariable('awx', 'f8', ('dim', 'dim'))
awx[:] = data['awx']
awy = ncFile.createVariable('awy', 'f8', ('dim', 'dim'))
awy[:] = data['awy']
siglay = ncFile.createVariable('siglay', 'f8', ('dim', 'dim'))
siglay[:] = data['siglay']
siglev = ncFile.createVariable('siglev', 'f8', ('dim', 'dim'))
siglev[:] = data['siglev']
ncFile.close()
| import netCDF4 as nc
def createNC(data):
ncFile = nc.Dataset('test.nc', 'w', format='NETCDF4')
#ncgrp = ncFile.createGroup('regioned')
#ncgrp.createDimension('dim', None)
#ncgrp = ncFile.createGroup('regioned')
#ncFile.createDimension('dimTest', None)
ncFile.createDimension('dim', None)
time = ncFile.createVariable('time', 'f8', ('dim',))
time[:] = data['time']
x = ncFile.createVariable('x', 'f8', ('dim',))
x[:] = data['x']
y = ncFile.createVariable('y', 'f8', ('dim',))
y[:] = data['y']
xc = ncFile.createVariable('xc', 'f8', ('dim',))
xc[:] = data['xc']
yc = ncFile.createVariable('yc', 'f8', ('dim',))
yc[:] = data['yc']
h = ncFile.createVariable('h', 'f8', ('dim',))
h[:] = data['h']
lon = ncFile.createVariable('lon', 'f8', ('dim',))
lon[:] = data['lon']
lat = ncFile.createVariable('lat', 'f8', ('dim',))
lat[:] = data['lat']
lonc = ncFile.createVariable('lonc', 'f8', ('dim',))
lonc[:] = data['lonc']
latc = ncFile.createVariable('latc', 'f8', ('dim',))
latc[:] = data['latc']
elev = ncFile.createVariable('elev', 'f8', ('dim', 'dim'))
elev[:] = data['elev']
ua = ncFile.createVariable('ua', 'f8', ('dim', 'dim'))
ua[:] = data['ua']
va = ncFile.createVariable('va', 'f8', ('dim', 'dim'))
va[:] = data['va']
node_index = ncFile.createVariable('node_index', 'f8', ('dim',))
node_index[:] = data['node_index']
element_index = ncFile.createVariable('element_index', 'f8', ('dim',))
element_index[:] = data['element_index']
nbe = ncFile.createVariable('nbe', 'f8', ('dim', 'dim'))
nbe[:] = data['nbe']
nv = ncFile.createVariable('nv', 'f8', ('dim', 'dim'))
nv[:] = data['nv']
a1u = ncFile.createVariable('a1u', 'f8', ('dim', 'dim'))
a1u[:] = data['a1u']
a2u = ncFile.createVariable('a2u', 'f8', ('dim', 'dim'))
a2u[:] = data['a2u']
aw0 = ncFile.createVariable('aw0', 'f8', ('dim', 'dim'))
aw0[:] = data['aw0']
awx = ncFile.createVariable('awx', 'f8', ('dim', 'dim'))
awx[:] = data['awx']
awy = ncFile.createVariable('awy', 'f8', ('dim', 'dim'))
awy[:] = data['awy']
siglay = ncFile.createVariable('siglay', 'f8', ('dim', 'dim'))
siglay[:] = data['siglay']
siglev = ncFile.createVariable('siglev', 'f8', ('dim', 'dim'))
siglev[:] = data['siglev']
ncFile.close()
| en | 0.123127 | #ncgrp = ncFile.createGroup('regioned') #ncgrp.createDimension('dim', None) #ncgrp = ncFile.createGroup('regioned') #ncFile.createDimension('dimTest', None) | 2.326468 | 2 |
kaneda/base.py | APSL/kaneda | 59 | 6617041 | from __future__ import absolute_import
from time import time
from functools import wraps
from kaneda.utils import get_kaneda_objects
class Metrics(object):
"""
Metrics reporting class
:param backend: instance of kaneda.backends. It is the responsible to store the reported data.
:param queue: instance of kaneda.queues. It is the responsible to store the reported data asynchronously.
If none of the parameters are passed it tries get the backend from kaneda settings file.
"""
def __init__(self, backend=None, queue=None):
self.backend = backend
self.queue = queue
if not self.backend and not self.queue:
self.backend, self.queue = get_kaneda_objects()
def gauge(self, name, value, tags=None):
"""
Record the value of a gauge.
>>> metrics.gauge('users.notifications', 13, tags=['new_message', 'follow_request'])
"""
return self._report(name, 'gauge', value, tags)
def increment(self, name, tags=None):
"""
Increment a counter.
>>> metrics.increment('user.profile.views')
"""
self._report(name, 'counter', 1, tags)
def decrement(self, name, tags=None):
"""
Decrement a counter.
>>> metrics.decrement('hotel.occupation')
"""
self._report(name, 'counter', -1, tags)
def timing(self, name, value, tags=None):
"""
Record a timing.
>>> metrics.timing('hotel.availability.request_time', 4)
"""
self._report(name, 'timing', value, tags)
def event(self, name, text, tags=None):
"""
Record an event.
>>> metrics.event('user.signup', 'New user registered')
"""
self._report(name, 'event', text, tags)
def custom(self, name, metric, value, tags=None, id_=None):
"""
Send a custom metric report.
>>> metrics.custom('hotel.response_data', metric='xml', value={'status': 'ok', 'xml': ...}, id_='2B75D750')
"""
self._report(name, metric, value, tags, id_)
class _TimedContextManagerDecorator(object):
"""
Class that implements the context manager and the decorator for "timed" method.
"""
def __init__(self, metrics, name=None, tags=None, use_ms=None):
self.metrics = metrics
self.name = name
self.tags = tags
self.use_ms = use_ms
def __call__(self, func):
"""
Decorator which returns the elapsed time of the function call.
"""
if not self.name:
self.name = u'{0:s}.{1:s}'.format(func.__module__, func.__name__)
@wraps(func)
def wrapped(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapped
def __enter__(self):
self.start = time()
def __exit__(self, type, value, traceback):
elapsed = time() - self.start
elapsed = int(round(1000 * elapsed)) if self.use_ms else elapsed
self.metrics.timing(self.name, elapsed, self.tags)
def timed(self, name=None, tags=None, use_ms=None):
"""
Measure the amount of time of a function (using a decorator) or a piece of
code (using a context manager). If name is not provided while using the decorator it
will be used the name of the module and the function.
::
# With decorator
@metrics.timed('request.response_time')
def perform_request(params):
pass
# With context manager
with metrics.timed('request.response_time'):
pass
"""
return self._TimedContextManagerDecorator(self, name, tags, use_ms)
def _report(self, name, metric, value, tags, id_=None):
if self.backend:
return self.backend.report(name, metric, value, tags, id_)
elif self.queue:
return self.queue.report(name, metric, value, tags, id_)
| from __future__ import absolute_import
from time import time
from functools import wraps
from kaneda.utils import get_kaneda_objects
class Metrics(object):
"""
Metrics reporting class
:param backend: instance of kaneda.backends. It is the responsible to store the reported data.
:param queue: instance of kaneda.queues. It is the responsible to store the reported data asynchronously.
If none of the parameters are passed it tries get the backend from kaneda settings file.
"""
def __init__(self, backend=None, queue=None):
self.backend = backend
self.queue = queue
if not self.backend and not self.queue:
self.backend, self.queue = get_kaneda_objects()
def gauge(self, name, value, tags=None):
"""
Record the value of a gauge.
>>> metrics.gauge('users.notifications', 13, tags=['new_message', 'follow_request'])
"""
return self._report(name, 'gauge', value, tags)
def increment(self, name, tags=None):
"""
Increment a counter.
>>> metrics.increment('user.profile.views')
"""
self._report(name, 'counter', 1, tags)
def decrement(self, name, tags=None):
"""
Decrement a counter.
>>> metrics.decrement('hotel.occupation')
"""
self._report(name, 'counter', -1, tags)
def timing(self, name, value, tags=None):
"""
Record a timing.
>>> metrics.timing('hotel.availability.request_time', 4)
"""
self._report(name, 'timing', value, tags)
def event(self, name, text, tags=None):
"""
Record an event.
>>> metrics.event('user.signup', 'New user registered')
"""
self._report(name, 'event', text, tags)
def custom(self, name, metric, value, tags=None, id_=None):
"""
Send a custom metric report.
>>> metrics.custom('hotel.response_data', metric='xml', value={'status': 'ok', 'xml': ...}, id_='2B75D750')
"""
self._report(name, metric, value, tags, id_)
class _TimedContextManagerDecorator(object):
"""
Class that implements the context manager and the decorator for "timed" method.
"""
def __init__(self, metrics, name=None, tags=None, use_ms=None):
self.metrics = metrics
self.name = name
self.tags = tags
self.use_ms = use_ms
def __call__(self, func):
"""
Decorator which returns the elapsed time of the function call.
"""
if not self.name:
self.name = u'{0:s}.{1:s}'.format(func.__module__, func.__name__)
@wraps(func)
def wrapped(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapped
def __enter__(self):
self.start = time()
def __exit__(self, type, value, traceback):
elapsed = time() - self.start
elapsed = int(round(1000 * elapsed)) if self.use_ms else elapsed
self.metrics.timing(self.name, elapsed, self.tags)
def timed(self, name=None, tags=None, use_ms=None):
"""
Measure the amount of time of a function (using a decorator) or a piece of
code (using a context manager). If name is not provided while using the decorator it
will be used the name of the module and the function.
::
# With decorator
@metrics.timed('request.response_time')
def perform_request(params):
pass
# With context manager
with metrics.timed('request.response_time'):
pass
"""
return self._TimedContextManagerDecorator(self, name, tags, use_ms)
def _report(self, name, metric, value, tags, id_=None):
if self.backend:
return self.backend.report(name, metric, value, tags, id_)
elif self.queue:
return self.queue.report(name, metric, value, tags, id_)
| en | 0.578465 | Metrics reporting class :param backend: instance of kaneda.backends. It is the responsible to store the reported data. :param queue: instance of kaneda.queues. It is the responsible to store the reported data asynchronously. If none of the parameters are passed it tries get the backend from kaneda settings file. Record the value of a gauge. >>> metrics.gauge('users.notifications', 13, tags=['new_message', 'follow_request']) Increment a counter. >>> metrics.increment('user.profile.views') Decrement a counter. >>> metrics.decrement('hotel.occupation') Record a timing. >>> metrics.timing('hotel.availability.request_time', 4) Record an event. >>> metrics.event('user.signup', 'New user registered') Send a custom metric report. >>> metrics.custom('hotel.response_data', metric='xml', value={'status': 'ok', 'xml': ...}, id_='2B75D750') Class that implements the context manager and the decorator for "timed" method. Decorator which returns the elapsed time of the function call. Measure the amount of time of a function (using a decorator) or a piece of code (using a context manager). If name is not provided while using the decorator it will be used the name of the module and the function. :: # With decorator @metrics.timed('request.response_time') def perform_request(params): pass # With context manager with metrics.timed('request.response_time'): pass | 2.725569 | 3 |
app/utils/log.py | axiaoxin/flask-ping | 8 | 6617042 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
import sys
import time
from functools import wraps
from logging import Logger, raiseExceptions
from logging import FileHandler
import settings
import utils
from services import sentry
class SplitLogger(Logger):
def __init__(self, name, level=logging.NOTSET):
super(SplitLogger, self).__init__(name, level)
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if hdlr.name == 'console':
if record.levelno >= hdlr.level:
hdlr.handle(record)
else:
if record.levelno == hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None # break out
else:
c = c.parent
if (
found == 0
) and raiseExceptions and not self.manager.emittedNoHandlerWarning: # noqa
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = 1
class RequestIDLogFilter(logging.Filter):
"""
Log filter to inject the current request id of the request
under `log_record.request_id`
"""
def filter(self, log_record):
from utils import request
log_record.request_id = request.current_request_id()
return log_record
def init_logger(logger_name=settings.LOGGER_NAME,
logging_level=settings.LOG_LEVEL,
log_in_console=settings.LOG_IN_CONSOLE,
log_in_file=settings.LOG_IN_FILE,
logfile_name=settings.LOGGER_NAME,
log_path=settings.LOG_PATH,
split_logfile_by_level=settings.SPLIT_LOGFILE_BY_LEVEL):
formatter = logging.Formatter(
'[%(asctime)s] [%(process)d] [%(levelname)s] [%(request_id)s] %(message)s') # noqa
if log_in_file:
if not os.path.exists(log_path):
os.makedirs(log_path)
if split_logfile_by_level:
logging.setLoggerClass(SplitLogger)
logger = logging.getLogger(logger_name)
level = logging.getLevelName(logging_level.upper())
logger.setLevel(level)
log_files = {
logging.DEBUG:
os.path.join(log_path, logfile_name + '.debug.log'),
logging.INFO:
os.path.join(log_path, logfile_name + '.info.log'),
logging.WARNING:
os.path.join(log_path, logfile_name + '.warning.log'),
logging.ERROR:
os.path.join(log_path, logfile_name + '.error.log'),
}
for log_level, log_file in log_files.items():
file_handler = FileHandler(log_file)
file_handler.setLevel(log_level)
file_handler.setFormatter(formatter)
file_handler.addFilter(RequestIDLogFilter())
logger.addHandler(file_handler)
else:
logger = logging.getLogger(logger_name)
level = logging.getLevelName(logging_level.upper())
logger.setLevel(level)
log_file = os.path.join(log_path, logfile_name + '.log')
file_handler = FileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
file_handler.addFilter(RequestIDLogFilter())
logger.addHandler(file_handler)
if log_in_console:
logger = logging.getLogger(logger_name)
level = logging.getLevelName(logging_level.upper())
logger.setLevel(level)
console_handler = logging.StreamHandler()
console_handler.name = "console"
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
console_handler.addFilter(RequestIDLogFilter())
logger.addHandler(console_handler)
return logger
app_logger = init_logger()
if settings.LOG_PEEWEE_SQL:
pw_logger = init_logger('peewee', logging_level='DEBUG',
log_in_file=True, logfile_name='peewee')
def _capture_exception_for_sentry(log_func):
def decorator(msg, *args, **kwargs):
log_func(msg, *args, **kwargs)
if isinstance(msg, Exception):
sentry.captureException()
return decorator
app_logger.debug = _capture_exception_for_sentry(app_logger.debug)
app_logger.info = _capture_exception_for_sentry(app_logger.info)
app_logger.warning = _capture_exception_for_sentry(app_logger.warning)
app_logger.error = _capture_exception_for_sentry(app_logger.error)
app_logger.critical = _capture_exception_for_sentry(app_logger.critical)
app_logger.exception = _capture_exception_for_sentry(app_logger.exception)
def _log_func_call(func, use_time, *func_args, **func_kwargs):
arg_names = func.func_code.co_varnames[:func.func_code.co_argcount]
args = func_args[:len(arg_names)]
defaults = func.func_defaults or ()
args = args + defaults[len(defaults) - (func.func_code.co_argcount - len(
args)):]
params = zip(arg_names, args)
args = func_args[len(arg_names):]
if args:
params.append(('args', args))
if func_kwargs:
params.append(('kwargs', func_kwargs))
func_name = utils.get_func_name(func)
func_call = u'{func_name}({params}) {use_time}ms'.format(
func_name=func_name,
params=', '.join('%s=%r' % p for p in params),
use_time=use_time * 1000)
app_logger.info(func_call)
def log_func_call(func):
'''Decorator to log function call'''
@wraps(func)
def wrapper(*func_args, **func_kwargs):
if settings.LOG_FUNC_CALL:
start_time = time.time()
data = func(*func_args, **func_kwargs)
use_time = time.time() - start_time
_log_func_call(func, use_time, *func_args, **func_kwargs)
return data
return func(*func_args, **func_kwargs)
return wrapper
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
import sys
import time
from functools import wraps
from logging import Logger, raiseExceptions
from logging import FileHandler
import settings
import utils
from services import sentry
class SplitLogger(Logger):
def __init__(self, name, level=logging.NOTSET):
super(SplitLogger, self).__init__(name, level)
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if hdlr.name == 'console':
if record.levelno >= hdlr.level:
hdlr.handle(record)
else:
if record.levelno == hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None # break out
else:
c = c.parent
if (
found == 0
) and raiseExceptions and not self.manager.emittedNoHandlerWarning: # noqa
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = 1
class RequestIDLogFilter(logging.Filter):
"""
Log filter to inject the current request id of the request
under `log_record.request_id`
"""
def filter(self, log_record):
from utils import request
log_record.request_id = request.current_request_id()
return log_record
def init_logger(logger_name=settings.LOGGER_NAME,
logging_level=settings.LOG_LEVEL,
log_in_console=settings.LOG_IN_CONSOLE,
log_in_file=settings.LOG_IN_FILE,
logfile_name=settings.LOGGER_NAME,
log_path=settings.LOG_PATH,
split_logfile_by_level=settings.SPLIT_LOGFILE_BY_LEVEL):
formatter = logging.Formatter(
'[%(asctime)s] [%(process)d] [%(levelname)s] [%(request_id)s] %(message)s') # noqa
if log_in_file:
if not os.path.exists(log_path):
os.makedirs(log_path)
if split_logfile_by_level:
logging.setLoggerClass(SplitLogger)
logger = logging.getLogger(logger_name)
level = logging.getLevelName(logging_level.upper())
logger.setLevel(level)
log_files = {
logging.DEBUG:
os.path.join(log_path, logfile_name + '.debug.log'),
logging.INFO:
os.path.join(log_path, logfile_name + '.info.log'),
logging.WARNING:
os.path.join(log_path, logfile_name + '.warning.log'),
logging.ERROR:
os.path.join(log_path, logfile_name + '.error.log'),
}
for log_level, log_file in log_files.items():
file_handler = FileHandler(log_file)
file_handler.setLevel(log_level)
file_handler.setFormatter(formatter)
file_handler.addFilter(RequestIDLogFilter())
logger.addHandler(file_handler)
else:
logger = logging.getLogger(logger_name)
level = logging.getLevelName(logging_level.upper())
logger.setLevel(level)
log_file = os.path.join(log_path, logfile_name + '.log')
file_handler = FileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
file_handler.addFilter(RequestIDLogFilter())
logger.addHandler(file_handler)
if log_in_console:
logger = logging.getLogger(logger_name)
level = logging.getLevelName(logging_level.upper())
logger.setLevel(level)
console_handler = logging.StreamHandler()
console_handler.name = "console"
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
console_handler.addFilter(RequestIDLogFilter())
logger.addHandler(console_handler)
return logger
app_logger = init_logger()
if settings.LOG_PEEWEE_SQL:
pw_logger = init_logger('peewee', logging_level='DEBUG',
log_in_file=True, logfile_name='peewee')
def _capture_exception_for_sentry(log_func):
def decorator(msg, *args, **kwargs):
log_func(msg, *args, **kwargs)
if isinstance(msg, Exception):
sentry.captureException()
return decorator
app_logger.debug = _capture_exception_for_sentry(app_logger.debug)
app_logger.info = _capture_exception_for_sentry(app_logger.info)
app_logger.warning = _capture_exception_for_sentry(app_logger.warning)
app_logger.error = _capture_exception_for_sentry(app_logger.error)
app_logger.critical = _capture_exception_for_sentry(app_logger.critical)
app_logger.exception = _capture_exception_for_sentry(app_logger.exception)
def _log_func_call(func, use_time, *func_args, **func_kwargs):
arg_names = func.func_code.co_varnames[:func.func_code.co_argcount]
args = func_args[:len(arg_names)]
defaults = func.func_defaults or ()
args = args + defaults[len(defaults) - (func.func_code.co_argcount - len(
args)):]
params = zip(arg_names, args)
args = func_args[len(arg_names):]
if args:
params.append(('args', args))
if func_kwargs:
params.append(('kwargs', func_kwargs))
func_name = utils.get_func_name(func)
func_call = u'{func_name}({params}) {use_time}ms'.format(
func_name=func_name,
params=', '.join('%s=%r' % p for p in params),
use_time=use_time * 1000)
app_logger.info(func_call)
def log_func_call(func):
'''Decorator to log function call'''
@wraps(func)
def wrapper(*func_args, **func_kwargs):
if settings.LOG_FUNC_CALL:
start_time = time.time()
data = func(*func_args, **func_kwargs)
use_time = time.time() - start_time
_log_func_call(func, use_time, *func_args, **func_kwargs)
return data
return func(*func_args, **func_kwargs)
return wrapper
| en | 0.839957 | #!/usr/bin/env python # -*- coding: utf-8 -*- Pass a record to all relevant handlers. Loop through all handlers for this logger and its parents in the logger hierarchy. If no handler was found, output a one-off error message to sys.stderr. Stop searching up the hierarchy whenever a logger with the "propagate" attribute set to zero is found - that will be the last logger whose handlers are called. # break out # noqa Log filter to inject the current request id of the request under `log_record.request_id` # noqa Decorator to log function call | 2.428676 | 2 |
cdesf2/__version__.py | emilioschepis/cdesf2 | 0 | 6617043 | """Current version of package cdesf2"""
__version__ = "1.0.0" | """Current version of package cdesf2"""
__version__ = "1.0.0" | en | 0.81954 | Current version of package cdesf2 | 0.82562 | 1 |
http.py | fengpf/py | 0 | 6617044 |
#!/usr/bin/env python
# Using same TCP connection for all HTTP requests
import os
import json
import time
import logging
import requests
from requests.auth import HTTPBasicAuth
logging.basicConfig(level=logging.DEBUG)
start_time = time.time()
def get_venmo_data(limit):
session = requests.Session()
url = "https://venmo.com/api/v5/public?limit={}"
for i in range(50):
response = session.get(url.format(limit))
response_dict = json.loads(response.text)
for transaction in response_dict["data"]:
print(unicode(transaction["message"]))
url = response_dict["paging"]["next"] + "&limit={}"
if __name__ == "__main__":
limit = 1
get_venmo_data(limit)
print("--- %s seconds ---" % (time.time() - start_time))
|
#!/usr/bin/env python
# Using same TCP connection for all HTTP requests
import os
import json
import time
import logging
import requests
from requests.auth import HTTPBasicAuth
logging.basicConfig(level=logging.DEBUG)
start_time = time.time()
def get_venmo_data(limit):
session = requests.Session()
url = "https://venmo.com/api/v5/public?limit={}"
for i in range(50):
response = session.get(url.format(limit))
response_dict = json.loads(response.text)
for transaction in response_dict["data"]:
print(unicode(transaction["message"]))
url = response_dict["paging"]["next"] + "&limit={}"
if __name__ == "__main__":
limit = 1
get_venmo_data(limit)
print("--- %s seconds ---" % (time.time() - start_time))
| en | 0.451371 | #!/usr/bin/env python # Using same TCP connection for all HTTP requests | 2.834095 | 3 |
backend/server/apps/endpoints/migrations/0001_initial.py | BetikuOluwatobi/model_deployment_with_django | 1 | 6617045 | <reponame>BetikuOluwatobi/model_deployment_with_django<filename>backend/server/apps/endpoints/migrations/0001_initial.py
# Generated by Django 3.0.10 on 2020-10-18 02:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Endpoints',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('owner', models.CharField(max_length=120)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='MLAlgorithm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('code', models.CharField(max_length=6000)),
('version', models.CharField(max_length=128)),
('owner', models.CharField(max_length=128)),
('created_at', models.DateTimeField(auto_now_add=True)),
('parent_endpoint', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='algorithms', to='endpoints.Endpoints')),
],
),
migrations.CreateModel(
name='MLRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('input_data', models.CharField(max_length=10000)),
('full_response', models.CharField(max_length=10000)),
('response', models.CharField(max_length=10000)),
('feedback', models.CharField(max_length=10000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('parent_mlalgorithm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='request', to='endpoints.MLAlgorithm')),
],
),
migrations.CreateModel(
name='MLAlgorithmStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('Testing', 'testing'), ('Staging', 'staging'), ('Production', 'production'), ('AB_Test', 'ab_testing')], default='Testing', max_length=50)),
('active', models.BooleanField(default=False)),
('created_by', models.CharField(max_length=128)),
('created_at', models.DateTimeField(auto_now_add=True)),
('parent_mlalgorithm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='status', to='endpoints.MLAlgorithm')),
],
),
]
| # Generated by Django 3.0.10 on 2020-10-18 02:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Endpoints',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('owner', models.CharField(max_length=120)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='MLAlgorithm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('code', models.CharField(max_length=6000)),
('version', models.CharField(max_length=128)),
('owner', models.CharField(max_length=128)),
('created_at', models.DateTimeField(auto_now_add=True)),
('parent_endpoint', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='algorithms', to='endpoints.Endpoints')),
],
),
migrations.CreateModel(
name='MLRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('input_data', models.CharField(max_length=10000)),
('full_response', models.CharField(max_length=10000)),
('response', models.CharField(max_length=10000)),
('feedback', models.CharField(max_length=10000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('parent_mlalgorithm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='request', to='endpoints.MLAlgorithm')),
],
),
migrations.CreateModel(
name='MLAlgorithmStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('Testing', 'testing'), ('Staging', 'staging'), ('Production', 'production'), ('AB_Test', 'ab_testing')], default='Testing', max_length=50)),
('active', models.BooleanField(default=False)),
('created_by', models.CharField(max_length=128)),
('created_at', models.DateTimeField(auto_now_add=True)),
('parent_mlalgorithm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='status', to='endpoints.MLAlgorithm')),
],
),
] | en | 0.845985 | # Generated by Django 3.0.10 on 2020-10-18 02:24 | 1.715174 | 2 |
python/sagiri-bot/SAGIRIBOT/basics/tools.py | GG-yuki/bugs | 0 | 6617046 | <reponame>GG-yuki/bugs
import io
import os
import base64
import hashlib
from urllib import parse
from PIL import Image as IMG
from SAGIRIBOT.basics.get_config import get_config
async def img_to_base_64(pic_path: str) -> str:
"""
Compress the image and transcode to Base64
Args:
pic_path: Img path
Examples:
img_base64 = await base_64(path)
Return:
str
"""
size = os.path.getsize(pic_path) / 1024
if size > 900:
print('>>>>压缩<<<<')
with IMG.open(pic_path) as img:
w, h = img.size
new_width = 500
new_height = round(new_width / w * h)
img = img.resize((new_width, new_height), IMG.ANTIALIAS)
img_buffer = io.BytesIO() # 生成buffer
img.save(img_buffer, format='PNG', quality=70)
byte_data = img_buffer.getvalue()
base64_data = base64.b64encode(byte_data)
code = base64_data.decode()
return code
with open(pic_path, 'rb') as f:
coding = base64.b64encode(f.read()) # 读取文件内容,转换为base64编码
return coding.decode()
async def curl_md5(src: str) -> str:
"""
MD5
Args:
src: sign
Examples:
sign = await curl_md5(sign)
Return:
str
"""
m = hashlib.md5(src.encode('UTF-8'))
return m.hexdigest().upper()
async def get_tx_sign(params: dict) -> str:
"""
Get sign of Tencent Ai Platform
Args:
params: Dict to send
Examples:
sign = await get_sign(params)
Return:
str
"""
app_key = await get_config("txAppKey")
params_keys = sorted(params.keys())
sign = ""
for i in params_keys:
if params[i] != '':
sign += "%s=%s&" % (i, parse.quote(params[i], safe=''))
sign += "app_key=%s" % app_key
sign = await curl_md5(sign)
print("signMD5:", sign)
return sign
| import io
import os
import base64
import hashlib
from urllib import parse
from PIL import Image as IMG
from SAGIRIBOT.basics.get_config import get_config
async def img_to_base_64(pic_path: str) -> str:
"""
Compress the image and transcode to Base64
Args:
pic_path: Img path
Examples:
img_base64 = await base_64(path)
Return:
str
"""
size = os.path.getsize(pic_path) / 1024
if size > 900:
print('>>>>压缩<<<<')
with IMG.open(pic_path) as img:
w, h = img.size
new_width = 500
new_height = round(new_width / w * h)
img = img.resize((new_width, new_height), IMG.ANTIALIAS)
img_buffer = io.BytesIO() # 生成buffer
img.save(img_buffer, format='PNG', quality=70)
byte_data = img_buffer.getvalue()
base64_data = base64.b64encode(byte_data)
code = base64_data.decode()
return code
with open(pic_path, 'rb') as f:
coding = base64.b64encode(f.read()) # 读取文件内容,转换为base64编码
return coding.decode()
async def curl_md5(src: str) -> str:
"""
MD5
Args:
src: sign
Examples:
sign = await curl_md5(sign)
Return:
str
"""
m = hashlib.md5(src.encode('UTF-8'))
return m.hexdigest().upper()
async def get_tx_sign(params: dict) -> str:
"""
Get sign of Tencent Ai Platform
Args:
params: Dict to send
Examples:
sign = await get_sign(params)
Return:
str
"""
app_key = await get_config("txAppKey")
params_keys = sorted(params.keys())
sign = ""
for i in params_keys:
if params[i] != '':
sign += "%s=%s&" % (i, parse.quote(params[i], safe=''))
sign += "app_key=%s" % app_key
sign = await curl_md5(sign)
print("signMD5:", sign)
return sign | en | 0.451664 | Compress the image and transcode to Base64 Args: pic_path: Img path Examples: img_base64 = await base_64(path) Return: str # 生成buffer # 读取文件内容,转换为base64编码 MD5 Args: src: sign Examples: sign = await curl_md5(sign) Return: str Get sign of Tencent Ai Platform Args: params: Dict to send Examples: sign = await get_sign(params) Return: str | 2.785546 | 3 |
challenges/foobar-py/3_2_access_codes.py | honno/code-solutions | 0 | 6617047 | #!/usr/bin/env python2.7
def solution(l):
ntriples = 0
for iy, y in enumerate(l[1:-1], 1):
ix = iy - 1
iz = iy + 1
nfactors_xy = 0
for x in l[:iy]:
if y % x == 0:
nfactors_xy += 1
nfactors_yz = 0
for z in l[iz:]:
if z % y == 0:
nfactors_yz += 1
ntriples += nfactors_xy * nfactors_yz
return ntriples
from unittest import TestCase
class Tests(TestCase):
def test1(self):
assert solution([1, 2, 3, 4, 5, 6]) == 3
def test2(self):
assert solution([1, 1, 1]) == 1
def test3(self):
assert solution([8, 4, 2, 1]) == 0
def test4(self):
assert solution([8, 4, 2, 1, 2, 4]) == 2
| #!/usr/bin/env python2.7
def solution(l):
ntriples = 0
for iy, y in enumerate(l[1:-1], 1):
ix = iy - 1
iz = iy + 1
nfactors_xy = 0
for x in l[:iy]:
if y % x == 0:
nfactors_xy += 1
nfactors_yz = 0
for z in l[iz:]:
if z % y == 0:
nfactors_yz += 1
ntriples += nfactors_xy * nfactors_yz
return ntriples
from unittest import TestCase
class Tests(TestCase):
def test1(self):
assert solution([1, 2, 3, 4, 5, 6]) == 3
def test2(self):
assert solution([1, 1, 1]) == 1
def test3(self):
assert solution([8, 4, 2, 1]) == 0
def test4(self):
assert solution([8, 4, 2, 1, 2, 4]) == 2
| ru | 0.174408 | #!/usr/bin/env python2.7 | 3.057484 | 3 |
marrow/package/tarjan.py | marrow/package | 5 | 6617048 | <filename>marrow/package/tarjan.py<gh_stars>1-10
"""Tarjan's algorithm and topological sorting implementation in Python.
by <NAME>
Public domain, do with it as you will.
From a blog post by <NAME>: http://www.logarithmic.net/pfh/blog/01208083168
Somee cleanup was applied, and Python 3 function annotations (typing module, typeguard validation) supplied.
"""
from collections import defaultdict
from typeguard import check_argument_types
from typing import List, Mapping, MutableMapping, Sequence, Tuple, Iterable
Graph = Mapping[str, Iterable[str]]
def strongly_connected_components(graph: Graph) -> List[Tuple[str, ...]]:
"""Find the strongly connected components in a graph using Tarjan's algorithm.
The `graph` argument should be a dictionary mapping node names to sequences of successor nodes.
"""
assert check_argument_types()
result: List[Tuple[str, ...]] = []
stack: List[str] = []
low: MutableMapping[str, int] = {}
def visit(node: str):
if node in low: return
num = len(low)
low[node] = num
stack_pos = len(stack)
stack.append(node)
for successor in graph[node]:
visit(successor)
low[node] = min(low[node], low[successor])
if num == low[node]:
component = tuple(stack[stack_pos:])
del stack[stack_pos:]
result.append(component)
for item in component:
low[item] = len(graph)
for node in graph:
visit(node)
return result
def topological_sort(graph:Graph) -> list:
assert check_argument_types()
count: MutableMapping[str, int] = defaultdict(lambda: 0)
for node in graph:
for successor in graph[node]:
count[successor] += 1
result = []
ready = [node for node in graph if count[node] == 0]
while ready:
node = ready.pop(-1)
result.append(node)
for successor in graph[node]:
count[successor] -= 1
if count[successor] == 0:
ready.append(successor)
return result
def robust_topological_sort(graph: Graph) -> list:
"""Identify strongly connected components then perform a topological sort of those components."""
assert check_argument_types()
components = strongly_connected_components(graph)
node_component = {}
component_graph: Graph = {}
for component in components:
for node in component:
node_component[node] = component
for component in components:
component_graph[component] = []
for node in graph:
node_c = node_component[node]
for successor in graph[node]:
successor_c = node_component[successor]
if node_c != successor_c:
component_graph[node_c].append(successor_c)
return topological_sort(component_graph)
| <filename>marrow/package/tarjan.py<gh_stars>1-10
"""Tarjan's algorithm and topological sorting implementation in Python.
by <NAME>
Public domain, do with it as you will.
From a blog post by <NAME>: http://www.logarithmic.net/pfh/blog/01208083168
Somee cleanup was applied, and Python 3 function annotations (typing module, typeguard validation) supplied.
"""
from collections import defaultdict
from typeguard import check_argument_types
from typing import List, Mapping, MutableMapping, Sequence, Tuple, Iterable
Graph = Mapping[str, Iterable[str]]
def strongly_connected_components(graph: Graph) -> List[Tuple[str, ...]]:
"""Find the strongly connected components in a graph using Tarjan's algorithm.
The `graph` argument should be a dictionary mapping node names to sequences of successor nodes.
"""
assert check_argument_types()
result: List[Tuple[str, ...]] = []
stack: List[str] = []
low: MutableMapping[str, int] = {}
def visit(node: str):
if node in low: return
num = len(low)
low[node] = num
stack_pos = len(stack)
stack.append(node)
for successor in graph[node]:
visit(successor)
low[node] = min(low[node], low[successor])
if num == low[node]:
component = tuple(stack[stack_pos:])
del stack[stack_pos:]
result.append(component)
for item in component:
low[item] = len(graph)
for node in graph:
visit(node)
return result
def topological_sort(graph:Graph) -> list:
assert check_argument_types()
count: MutableMapping[str, int] = defaultdict(lambda: 0)
for node in graph:
for successor in graph[node]:
count[successor] += 1
result = []
ready = [node for node in graph if count[node] == 0]
while ready:
node = ready.pop(-1)
result.append(node)
for successor in graph[node]:
count[successor] -= 1
if count[successor] == 0:
ready.append(successor)
return result
def robust_topological_sort(graph: Graph) -> list:
"""Identify strongly connected components then perform a topological sort of those components."""
assert check_argument_types()
components = strongly_connected_components(graph)
node_component = {}
component_graph: Graph = {}
for component in components:
for node in component:
node_component[node] = component
for component in components:
component_graph[component] = []
for node in graph:
node_c = node_component[node]
for successor in graph[node]:
successor_c = node_component[successor]
if node_c != successor_c:
component_graph[node_c].append(successor_c)
return topological_sort(component_graph)
| en | 0.802531 | Tarjan's algorithm and topological sorting implementation in Python. by <NAME> Public domain, do with it as you will. From a blog post by <NAME>: http://www.logarithmic.net/pfh/blog/01208083168 Somee cleanup was applied, and Python 3 function annotations (typing module, typeguard validation) supplied. Find the strongly connected components in a graph using Tarjan's algorithm. The `graph` argument should be a dictionary mapping node names to sequences of successor nodes. Identify strongly connected components then perform a topological sort of those components. | 2.787648 | 3 |
tests/functional/transport/pecan/controllers/test_background_jobs.py | satroutr/poppy | 3 | 6617049 | # Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
import ddt
import mock
from tests.functional.transport.pecan import base
@ddt.ddt
class BackgroundJobControllerTest(base.FunctionalTest):
def setUp(self):
class san_cert_cnames_caller(mock.Mock):
pass
san_cert_cnames_caller.return_value = [
"secure1.test_san.com",
"secure2.test_san.com"
]
background_job_controller_patcher = mock.patch(
'poppy.provider.akamai.certificates.'
'CertificateController.san_cert_cnames',
new=san_cert_cnames_caller(),
)
background_job_controller_patcher.start()
self.addCleanup(background_job_controller_patcher.stop)
super(BackgroundJobControllerTest, self).setUp()
self.project_id = str(uuid.uuid1())
self.service_name = str(uuid.uuid1())
self.flavor_id = str(uuid.uuid1())
@ddt.file_data("data_post_background_jobs_bad_input.json")
def test_post_background_job_negative(self, background_job_json):
response = self.app.post('/v1.0/admin/provider/akamai/background_job',
headers={'Content-Type': 'application/json',
'X-Project-ID': self.project_id},
params=json.dumps(background_job_json),
expect_errors=True)
self.assertEqual(400, response.status_code)
@ddt.file_data("data_post_background_jobs.json")
def test_post_background_job_positive(self, background_job_json):
response = self.app.post('/v1.0/admin/provider/akamai/background_job',
headers={'Content-Type': 'application/json',
'X-Project-ID': self.project_id},
params=json.dumps(background_job_json))
self.assertEqual(202, response.status_code)
| # Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
import ddt
import mock
from tests.functional.transport.pecan import base
@ddt.ddt
class BackgroundJobControllerTest(base.FunctionalTest):
def setUp(self):
class san_cert_cnames_caller(mock.Mock):
pass
san_cert_cnames_caller.return_value = [
"secure1.test_san.com",
"secure2.test_san.com"
]
background_job_controller_patcher = mock.patch(
'poppy.provider.akamai.certificates.'
'CertificateController.san_cert_cnames',
new=san_cert_cnames_caller(),
)
background_job_controller_patcher.start()
self.addCleanup(background_job_controller_patcher.stop)
super(BackgroundJobControllerTest, self).setUp()
self.project_id = str(uuid.uuid1())
self.service_name = str(uuid.uuid1())
self.flavor_id = str(uuid.uuid1())
@ddt.file_data("data_post_background_jobs_bad_input.json")
def test_post_background_job_negative(self, background_job_json):
response = self.app.post('/v1.0/admin/provider/akamai/background_job',
headers={'Content-Type': 'application/json',
'X-Project-ID': self.project_id},
params=json.dumps(background_job_json),
expect_errors=True)
self.assertEqual(400, response.status_code)
@ddt.file_data("data_post_background_jobs.json")
def test_post_background_job_positive(self, background_job_json):
response = self.app.post('/v1.0/admin/provider/akamai/background_job',
headers={'Content-Type': 'application/json',
'X-Project-ID': self.project_id},
params=json.dumps(background_job_json))
self.assertEqual(202, response.status_code)
| en | 0.852997 | # Copyright (c) 2015 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.7118 | 2 |
get_config.py | BTCallahan/super-ds9 | 0 | 6617050 |
from functools import lru_cache
from math import ceil
from typing import Dict, Final
from collections.abc import Mapping
from datetime import timedelta
from dataclasses import dataclass
import re
from global_functions import get_first_group_in_pattern
from data_globals import string_or_int
from coords import Coords
@dataclass(frozen=True)
class ConfigObject:
chances_to_detect_cloak:int
time_per_turn:int
energy_cost_per_torpedo:int
life_support_offline_turn_limit:int
local_energy_cost:int
sector_energy_cost:int
screen_width:int
screen_height:int
sector_width:int
sector_height:int
subsector_width:int
subsector_height:int
sector_display_x:int
sector_display_y:int
subsector_display_x:int
subsector_display_y:int
message_display_x:int
message_display_end_x:int
message_display_y:int
message_display_end_y:int
your_ship_display_x:int
your_ship_display_end_x:int
your_ship_display_y:int
your_ship_display_end_y:int
other_ship_display_x:int
other_ship_display_end_x:int
other_ship_display_y:int
other_ship_display_end_y:int
command_display_x:int
command_display_end_x:int
command_display_y:int
command_display_end_y:int
position_info_x:int
position_info_end_x:int
position_info_y:int
position_info_end_y:int
graphics:str
max_warp_distance:int
max_move_distance:int
max_distance:int
@classmethod
def create_config(self) -> "ConfigObject":
config_file_pattern = re.compile(r"config_file:([\w.,-]+)\n")
with open("config.ini", "r") as f:
#lines = f.readlines()
text = f.read()
config_file:str = get_first_group_in_pattern(
text, config_file_pattern,
error_message="The file 'config.ini' did not contain an entry for 'config_file'",
error_type_to_raise=OSError
)
if config_file is None:
raise OSError(
"The file 'config.ini' did not contain an entry for 'config_file'"
)
else:
config_file = "configurations/" + config_file.strip()
seconds_per_turn_pattern = re.compile(r"seconds_per_turn:([\d]+)\n")
seconds_per_turn = get_first_group_in_pattern(
text, seconds_per_turn_pattern, type_to_convert_to=int,
error_message="The file 'config.ini' did not contain an entry for 'seconds_per_turn'",
error_type_to_raise=OSError
)
if seconds_per_turn is None:
raise OSError("The file 'config.ini' did not contain an entry for 'seconds_per_turn'")
elif seconds_per_turn == 0:
raise ValueError(
"The value of 'seconds_per_turn' is zero, which means that no time will pass between turns"
)
time_per_turn = timedelta(seconds=seconds_per_turn)
chances_to_detect_cloak_pattern = re.compile(r"chances_to_detect_cloak:([\d]+)\n")
chances_to_detect_cloak = get_first_group_in_pattern(
text, chances_to_detect_cloak_pattern, type_to_convert_to=int,
error_message="The file 'config.ini' did not contain an entry for 'chances_to_detect_cloak'",
error_type_to_raise=OSError
)
if chances_to_detect_cloak is None:
raise OSError("The file 'config.ini' did not contain an entry for 'chances_to_detect_cloak'")
elif chances_to_detect_cloak == 0:
raise ValueError("The value of 'chances_to_detect_cloak' is zero, which means that ship will not get any chances to detect a cloaked ship")
chances_to_detect_cloak = chances_to_detect_cloak
energy_cost_per_torpedo_patten = re.compile(r"energy_cost_per_torpedo:([\d]+)\n")
energy_cost_per_torpedo = get_first_group_in_pattern(
text, energy_cost_per_torpedo_patten, type_to_convert_to=int,
error_message="The file 'config.ini' did not contain an entry for 'energy_cost_per_torpedo'",
error_type_to_raise=OSError
)
if energy_cost_per_torpedo is None:
raise OSError("The file 'config.ini' did not contain an entry for 'energy_cost_per_torpedo'")
life_support_offline_turn_limit_pattern = re.compile(r"life_support_offline_turn_limit:([\d]+)\n")
life_support_offline_turn_limit = get_first_group_in_pattern(
text, life_support_offline_turn_limit_pattern, type_to_convert_to=int,
error_message="The file 'config.ini' did not contain an entry for 'life_support_offline_turn_limit'",
error_type_to_raise=OSError
)
local_energy_cost_pattern = re.compile(r"local_energy_cost:([\d]+)\n")
local_energy_cost = get_first_group_in_pattern(
text, local_energy_cost_pattern, type_to_convert_to=int,
error_message="The file 'config.ini' did not contain an entry for 'local_energy_cost'",
error_type_to_raise=OSError
)
sector_energy_cost_pattern = re.compile(r"sector_energy_cost:([\d]+)\n")
sector_energy_cost = get_first_group_in_pattern(
text, sector_energy_cost_pattern, type_to_convert_to=int,
error_message="The file 'config.ini' did not contain an entry for 'sector_energy_cost'",
error_type_to_raise=OSError
)
d:Dict[str,string_or_int] = {}
with open(config_file, "r") as f:
lines = f.readlines()
for line in lines:
if ":" in line and line[0] != "#":
k ,v = line.split(":")
try:
d[k] = int(v)
except ValueError:
d[k] = v
f.close()
your_ship_display_x = d['your_ship_display_x']
your_ship_display_end_x = d['your_ship_display_end_x']
your_ship_display_y = d['your_ship_display_y']
your_ship_display_end_y = d['your_ship_display_end_y']
other_ship_display_x = d['other_ship_display_x']
other_ship_display_end_x = d['other_ship_display_end_x']
other_ship_display_y = d['other_ship_display_y']
other_ship_display_end_y = d['other_ship_display_end_y']
command_display_x = d['command_display_x']
command_display_end_x = d['command_display_end_x']
command_display_y = d['command_display_y']
command_display_end_y = d['command_display_end_y']
position_info_x = d['position_info_x']
position_info_end_x = d['position_info_end_x']
position_info_y = d['position_info_y']
position_info_end_y = d['position_info_end_y']
graphics = "fonts/" + d['graphics']
c1:Coords = Coords(x=0, y=0)
max_warp_distance = ceil(c1.distance(x=d["sector_width"], y=d["sector_height"]))
max_move_distance = ceil(c1.distance(x=d["subsector_width"], y=d["subsector_height"]))
max_distance = max(max_warp_distance, max_move_distance)
screen_width = d['screen_width']
screen_height = d['screen_height']
sector_width = d['sector_width']
sector_height = d['sector_height']
subsector_width = d['subsector_width']
subsector_height = d['subsector_height']
sector_display_x = d['sector_display_x']
sector_display_y = d['sector_display_y']
subsector_display_x = d['subsector_display_x']
subsector_display_y = d['subsector_display_y']
message_display_x = d['message_display_x']
message_display_end_x = d['message_display_end_x']
message_display_y = d['message_display_y']
message_display_end_y = d['message_display_end_y']
return ConfigObject(
chances_to_detect_cloak=chances_to_detect_cloak,
time_per_turn=time_per_turn,
energy_cost_per_torpedo=energy_cost_per_torpedo,
life_support_offline_turn_limit=life_support_offline_turn_limit,
local_energy_cost=local_energy_cost,
sector_energy_cost=sector_energy_cost,
screen_width=screen_width,
screen_height=screen_height,
sector_width=sector_width,
sector_height=sector_height,
subsector_width=subsector_width,
subsector_height=subsector_height,
sector_display_x=sector_display_x,
sector_display_y=sector_display_y,
subsector_display_x=subsector_display_x,
subsector_display_y=subsector_display_y,
message_display_x=message_display_x,
message_display_end_x=message_display_end_x,
message_display_y=message_display_y,
message_display_end_y=message_display_end_y,
your_ship_display_x=your_ship_display_x,
your_ship_display_end_x=your_ship_display_end_x,
your_ship_display_y=your_ship_display_y,
your_ship_display_end_y=your_ship_display_end_y,
other_ship_display_x=other_ship_display_x,
other_ship_display_end_x=other_ship_display_end_x,
other_ship_display_y=other_ship_display_y,
other_ship_display_end_y=other_ship_display_end_y,
command_display_x=command_display_x,
command_display_end_x=command_display_end_x,
command_display_y=command_display_y,
command_display_end_y=command_display_end_y,
position_info_x=position_info_x,
position_info_end_x=position_info_end_x,
position_info_y=position_info_y,
position_info_end_y=position_info_end_y,
graphics=graphics,
max_warp_distance=max_warp_distance,
max_move_distance=max_move_distance,
max_distance=max_distance
)
CONFIG_OBJECT:Final= ConfigObject.create_config()
@lru_cache
def get_lookup_table(
*, direction_x:float, direction_y:float, normalise_direction:bool=True, no_dups:bool=True
):
new_coords_x, new_coords_y = Coords(
x=direction_x, y=direction_y
).normalize() if normalise_direction else (direction_x, direction_y)
def create_tuple():
old_x, old_y = new_coords_x, new_coords_y
old_c = None
for r in range(CONFIG_OBJECT.max_distance):
c:Coords = Coords(round(old_x), round(old_y))
if not no_dups or (not old_c or c != old_c):
yield c
old_c = c
old_x += new_coords_x
old_y += new_coords_y
return tuple(create_tuple())
|
from functools import lru_cache
from math import ceil
from typing import Dict, Final
from collections.abc import Mapping
from datetime import timedelta
from dataclasses import dataclass
import re
from global_functions import get_first_group_in_pattern
from data_globals import string_or_int
from coords import Coords
@dataclass(frozen=True)
class ConfigObject:
chances_to_detect_cloak:int
time_per_turn:int
energy_cost_per_torpedo:int
life_support_offline_turn_limit:int
local_energy_cost:int
sector_energy_cost:int
screen_width:int
screen_height:int
sector_width:int
sector_height:int
subsector_width:int
subsector_height:int
sector_display_x:int
sector_display_y:int
subsector_display_x:int
subsector_display_y:int
message_display_x:int
message_display_end_x:int
message_display_y:int
message_display_end_y:int
your_ship_display_x:int
your_ship_display_end_x:int
your_ship_display_y:int
your_ship_display_end_y:int
other_ship_display_x:int
other_ship_display_end_x:int
other_ship_display_y:int
other_ship_display_end_y:int
command_display_x:int
command_display_end_x:int
command_display_y:int
command_display_end_y:int
position_info_x:int
position_info_end_x:int
position_info_y:int
position_info_end_y:int
graphics:str
max_warp_distance:int
max_move_distance:int
max_distance:int
@classmethod
def create_config(self) -> "ConfigObject":
config_file_pattern = re.compile(r"config_file:([\w.,-]+)\n")
with open("config.ini", "r") as f:
#lines = f.readlines()
text = f.read()
config_file:str = get_first_group_in_pattern(
text, config_file_pattern,
error_message="The file 'config.ini' did not contain an entry for 'config_file'",
error_type_to_raise=OSError
)
if config_file is None:
raise OSError(
"The file 'config.ini' did not contain an entry for 'config_file'"
)
else:
config_file = "configurations/" + config_file.strip()
seconds_per_turn_pattern = re.compile(r"seconds_per_turn:([\d]+)\n")
seconds_per_turn = get_first_group_in_pattern(
text, seconds_per_turn_pattern, type_to_convert_to=int,
error_message="The file 'config.ini' did not contain an entry for 'seconds_per_turn'",
error_type_to_raise=OSError
)
if seconds_per_turn is None:
raise OSError("The file 'config.ini' did not contain an entry for 'seconds_per_turn'")
elif seconds_per_turn == 0:
raise ValueError(
"The value of 'seconds_per_turn' is zero, which means that no time will pass between turns"
)
time_per_turn = timedelta(seconds=seconds_per_turn)
chances_to_detect_cloak_pattern = re.compile(r"chances_to_detect_cloak:([\d]+)\n")
chances_to_detect_cloak = get_first_group_in_pattern(
text, chances_to_detect_cloak_pattern, type_to_convert_to=int,
error_message="The file 'config.ini' did not contain an entry for 'chances_to_detect_cloak'",
error_type_to_raise=OSError
)
if chances_to_detect_cloak is None:
raise OSError("The file 'config.ini' did not contain an entry for 'chances_to_detect_cloak'")
elif chances_to_detect_cloak == 0:
raise ValueError("The value of 'chances_to_detect_cloak' is zero, which means that ship will not get any chances to detect a cloaked ship")
chances_to_detect_cloak = chances_to_detect_cloak
energy_cost_per_torpedo_patten = re.compile(r"energy_cost_per_torpedo:([\d]+)\n")
energy_cost_per_torpedo = get_first_group_in_pattern(
text, energy_cost_per_torpedo_patten, type_to_convert_to=int,
error_message="The file 'config.ini' did not contain an entry for 'energy_cost_per_torpedo'",
error_type_to_raise=OSError
)
if energy_cost_per_torpedo is None:
raise OSError("The file 'config.ini' did not contain an entry for 'energy_cost_per_torpedo'")
life_support_offline_turn_limit_pattern = re.compile(r"life_support_offline_turn_limit:([\d]+)\n")
life_support_offline_turn_limit = get_first_group_in_pattern(
text, life_support_offline_turn_limit_pattern, type_to_convert_to=int,
error_message="The file 'config.ini' did not contain an entry for 'life_support_offline_turn_limit'",
error_type_to_raise=OSError
)
local_energy_cost_pattern = re.compile(r"local_energy_cost:([\d]+)\n")
local_energy_cost = get_first_group_in_pattern(
text, local_energy_cost_pattern, type_to_convert_to=int,
error_message="The file 'config.ini' did not contain an entry for 'local_energy_cost'",
error_type_to_raise=OSError
)
sector_energy_cost_pattern = re.compile(r"sector_energy_cost:([\d]+)\n")
sector_energy_cost = get_first_group_in_pattern(
text, sector_energy_cost_pattern, type_to_convert_to=int,
error_message="The file 'config.ini' did not contain an entry for 'sector_energy_cost'",
error_type_to_raise=OSError
)
d:Dict[str,string_or_int] = {}
with open(config_file, "r") as f:
lines = f.readlines()
for line in lines:
if ":" in line and line[0] != "#":
k ,v = line.split(":")
try:
d[k] = int(v)
except ValueError:
d[k] = v
f.close()
your_ship_display_x = d['your_ship_display_x']
your_ship_display_end_x = d['your_ship_display_end_x']
your_ship_display_y = d['your_ship_display_y']
your_ship_display_end_y = d['your_ship_display_end_y']
other_ship_display_x = d['other_ship_display_x']
other_ship_display_end_x = d['other_ship_display_end_x']
other_ship_display_y = d['other_ship_display_y']
other_ship_display_end_y = d['other_ship_display_end_y']
command_display_x = d['command_display_x']
command_display_end_x = d['command_display_end_x']
command_display_y = d['command_display_y']
command_display_end_y = d['command_display_end_y']
position_info_x = d['position_info_x']
position_info_end_x = d['position_info_end_x']
position_info_y = d['position_info_y']
position_info_end_y = d['position_info_end_y']
graphics = "fonts/" + d['graphics']
c1:Coords = Coords(x=0, y=0)
max_warp_distance = ceil(c1.distance(x=d["sector_width"], y=d["sector_height"]))
max_move_distance = ceil(c1.distance(x=d["subsector_width"], y=d["subsector_height"]))
max_distance = max(max_warp_distance, max_move_distance)
screen_width = d['screen_width']
screen_height = d['screen_height']
sector_width = d['sector_width']
sector_height = d['sector_height']
subsector_width = d['subsector_width']
subsector_height = d['subsector_height']
sector_display_x = d['sector_display_x']
sector_display_y = d['sector_display_y']
subsector_display_x = d['subsector_display_x']
subsector_display_y = d['subsector_display_y']
message_display_x = d['message_display_x']
message_display_end_x = d['message_display_end_x']
message_display_y = d['message_display_y']
message_display_end_y = d['message_display_end_y']
return ConfigObject(
chances_to_detect_cloak=chances_to_detect_cloak,
time_per_turn=time_per_turn,
energy_cost_per_torpedo=energy_cost_per_torpedo,
life_support_offline_turn_limit=life_support_offline_turn_limit,
local_energy_cost=local_energy_cost,
sector_energy_cost=sector_energy_cost,
screen_width=screen_width,
screen_height=screen_height,
sector_width=sector_width,
sector_height=sector_height,
subsector_width=subsector_width,
subsector_height=subsector_height,
sector_display_x=sector_display_x,
sector_display_y=sector_display_y,
subsector_display_x=subsector_display_x,
subsector_display_y=subsector_display_y,
message_display_x=message_display_x,
message_display_end_x=message_display_end_x,
message_display_y=message_display_y,
message_display_end_y=message_display_end_y,
your_ship_display_x=your_ship_display_x,
your_ship_display_end_x=your_ship_display_end_x,
your_ship_display_y=your_ship_display_y,
your_ship_display_end_y=your_ship_display_end_y,
other_ship_display_x=other_ship_display_x,
other_ship_display_end_x=other_ship_display_end_x,
other_ship_display_y=other_ship_display_y,
other_ship_display_end_y=other_ship_display_end_y,
command_display_x=command_display_x,
command_display_end_x=command_display_end_x,
command_display_y=command_display_y,
command_display_end_y=command_display_end_y,
position_info_x=position_info_x,
position_info_end_x=position_info_end_x,
position_info_y=position_info_y,
position_info_end_y=position_info_end_y,
graphics=graphics,
max_warp_distance=max_warp_distance,
max_move_distance=max_move_distance,
max_distance=max_distance
)
CONFIG_OBJECT:Final= ConfigObject.create_config()
@lru_cache
def get_lookup_table(
*, direction_x:float, direction_y:float, normalise_direction:bool=True, no_dups:bool=True
):
new_coords_x, new_coords_y = Coords(
x=direction_x, y=direction_y
).normalize() if normalise_direction else (direction_x, direction_y)
def create_tuple():
old_x, old_y = new_coords_x, new_coords_y
old_c = None
for r in range(CONFIG_OBJECT.max_distance):
c:Coords = Coords(round(old_x), round(old_y))
if not no_dups or (not old_c or c != old_c):
yield c
old_c = c
old_x += new_coords_x
old_y += new_coords_y
return tuple(create_tuple())
| sv | 0.170477 | #lines = f.readlines() | 2.242512 | 2 |