max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
instanotifier/fetcher/scripts/fetcher.py | chaudbak/instanotifier | 0 | 17700 | from instanotifier.fetcher import tests
def run():
# is executed when ran with 'manage.py runscript tests'
tests.test_rss_fetcher()
| 1.359375 | 1 |
clase_4/populate_alumnos.py | noctilukkas/python-programming | 0 | 17701 | import sqlite3
def main():
# se establece conexion con la BD y abro cursor
conn = sqlite3.connect("alumnos.db")
cursor = conn.cursor()
# creo una tupla de tuplas para agregar registros a la tabla
alumnos = (
(1, "Juan", "Granizado", 8, 25),
(2, "Esteban", "Quito", 2, 19),
(3, "Marina", "Cordoba", 10, 25),
)
for alumno in alumnos:
cursor.execute("INSERT INTO alumnos VALUES (?, ?, ?, ?, ?)", alumno)
# Para que se agreguen los registros efectivamente tenemos que hacer commit
conn.commit()
print("Datos cargados!")
# Cerramos conexion
conn.close()
if __name__ == '__main__':
main()
| 4 | 4 |
cap11/main.py | felipesch92/livroPython | 0 | 17702 | import sqlite3
con = sqlite3.connect('agenda.db')
cursor = con.cursor()
cursor.execute('''
create table if not exists agenda(
nome text,
telefone text)
''')
cursor.execute('''
insert into agenda(nome, telefone)
values(?, ?)
''', ("Tamara", "51-98175-0510"))
con.commit()
cursor.close()
con.close()
| 3.015625 | 3 |
falconcv/data/scraper/flickr_scraper.py | haruiz/FalconCV | 16 | 17703 | import logging
import math
import re
import time
import dask
import numpy as np
import requests
import json
import xml.etree.ElementTree as ET
from falconcv.data.scraper.scraper import ImagesScraper
from falconcv.util import ImageUtil
logger = logging.getLogger(__name__)
FLICKR_ENDPOINT = "https://www.flickr.com/services/rest"
# List of sizes:
# url_o: Original (4520 × 3229)
# url_k: Large 2048 (2048 × 1463)
# url_h: Large 1600 (1600 × 1143)
# url_l=: Large 1024 (1024 × 732)
# url_c: Medium 800 (800 × 572)
# url_z: Medium 640 (640 × 457)
# url_m: Medium 500 (500 × 357)
# url_n: Small 320 (320 × 229)
# url_s: Small 240 (240 × 171)
# url_t: Thumbnail (100 × 71)
# url_q: Square 150 (150 × 150)
# url_sq: Square 75 (75 × 75)
class FlickrScraper(ImagesScraper):
def __init__(self, api_key):
super(FlickrScraper, self).__init__()
self.api_key = api_key
def _authenticate(self):
pass
def _get_total_matches(self, q):
total_matches = 0
try:
response = requests.get(url=FLICKR_ENDPOINT, params={
"api_key": self.api_key,
"method": "flickr.photos.search",
"tags": ",".join(q),
"tag_mode": "any",
# "privacy_filter": "1"
"content_type": 1,
"media": "photos",
"per_page": 0,
"format": "json"
})
if response.status_code == 200:
json_text = re.search(r'\((.*?)\)', response.text).group(1)
json_object = json.loads(json_text)
if json_object["stat"] == "ok":
total_matches = int(json_object["photos"]["total"])
# total_matches = json_object["photos"]
except Exception as ex:
logger.error("Error making the request : {}".format(ex))
return total_matches
def _request_photos(self, q, count, page):
images = []
try:
response = requests.get(url=FLICKR_ENDPOINT, params={
"api_key": self.api_key,
"method": "flickr.photos.search",
"tags": ",".join(q),
"tag_mode": "any",
# "privacy_filter": "1"
"content_type": 1,
"media": "photos",
"per_page": count,
"page": page,
"extras": ",".join(["url_o", "url_k", "url_h", "url_l", "url_c", "url_m"])
})
if response.status_code == 200:
try:
# print(response.text)
root: ET.Element = ET.fromstring(response.text)
stat = root.get("stat")
if stat == "ok":
for photo in root.iterfind("photos/photo"):
photo: ET.Element
images.append(photo.attrib)
except Exception as ex:
logger.error("error gathering the response: {}".format(ex))
except Exception as ex:
logger.error("Error making the request : {}".format(ex))
return images
@dask.delayed
def _fetch_image(self, image_info, sz):
try:
if sz in image_info:
url = image_info[sz]
return ImageUtil.url2img(url)
except Exception as ex:
logger.error("Error fetching the image: " % ex)
return None
def fetch(self, q, batch_size: int = 100, timestamp=1, sz="url_m"):
try:
assert batch_size <= 500, "invalid count parameter"
total_matches = self._get_total_matches(q)
logger.debug("{} images found ".format(total_matches))
number_of_pages = math.ceil(total_matches / batch_size)
for page in range(1, number_of_pages):
photos = self._request_photos(q, batch_size, page)
delayed_tasks = list(map(lambda img: self._fetch_image(img, sz), photos))
compute_result = dask.compute(*delayed_tasks)
yield [img for img in compute_result if isinstance(img, np.ndarray)]
time.sleep(timestamp)
except Exception as ex:
logger.error("error fetching the images: {}".format(ex))
| 2.390625 | 2 |
argparser.py | geoff-smith/MCplotscripts | 0 | 17704 | <reponame>geoff-smith/MCplotscripts
# argParser
# this class generates a RunParams object from the args passed to the script
from runparams import *
import os.path
import string
## handles args passed to the program
#
class ArgParser(object):
def parsePtCutString(self, ptCutString):
return map(float, string.split(ptCutString,',') )
def parseEventsString(self, eventsString):
return map(int, string.split(eventsString,',') )
def displayUserInfo(self):
print ""
print "o------------------o"
print "|Extracthistos Info|"
print "o------------------o"
print ""
print "[example usage]"
print ""
print "extracthistos inputFile.root"
print ""
print "extracthistos inputFile.root /intputDir/*.root --visualize --output outputfile-extracted.root --ptcuts 20,30,50,100 --etacut 2.5 --limit 100"
print ""
print "extracthistos inputFile.root /intputDir/*.root -v -o outputfile-extracted.root -p 20,30,50,100 -e 2.5 -l 100"
print ""
print "[switches]"
print " -d | --debug: Show debug information"
print " -e | --etacut: Set etaCut (double)"
print " -f | --force: Force overwriting of output file"
print " -i | --info: Shows this info"
print " -l | --limit: Limit maximum # of events processed"
print " -o | --output: Set output file (string)"
print " -od | --output-outputdirectory: Set output directory (string)"
print " -p | --ptcuts: Set pTcuts (list of doubles seperated by ',')"
print " -# | --events: Specify events to processed (list of ints seperated by ',')"
print " -m | --multi-processing: create n (int) subprocesses"
print " -% | --modulo: process only every nth event (int)"
print " -%r | --modulo-rest: process only every nth + r event (int)"
print " -v | --visualize: Create visualization(s)"
print " -vs | --visualize-skip-copies: Do not render non-physical particle copies"
print " -vnu | --visualize-no-underlying-event: Do not visualize the underlying event"
print " -vni | --visualize-no-main-interaction: Do not visualize the main interaction"
print " -vsj | --visualize-color-special-jets: Color special particle jets"
print " -vce | --visualize-cutoff-energy: Specify Visualization energy cutoff (double)"
print " -vcs | --visualize-cutoff-special-jets: Cutoff Special Jets"
print " -vcr | --visualize-cutoff-radiation: Cutoff ISR/FSR Jets"
print " -vme | --visualize-mode-energy: Color particles by their energy"
print " -vmp | --visualize-mode-pt: Color particles by their pT"
print " -vr | --visualize-renderer: Specify GraphViz renderer (string), defaults to 'dot'"
print ""
def __init__(self, args):
self.runParams = RunParams()
lenArgs = len(args)
skip = False
forceOutputOverride = False
for i in range (0, lenArgs):
# skip first arg as it's the script's name
if i == 0 or skip:
skip = False
continue
# provide arg and nextArg (if possible)
arg = args[i]
nextArg = None
if (i < lenArgs - 1):
nextArg = args[i+1]
# parse switches
if ( arg == "-d" ) or ( arg == "--debug" ) :
self.runParams.useDebugOutput = True
continue
if ( arg == "-e" ) or ( arg == "--etacut" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.eta = float(nextArg)
skip = True
continue
if ( arg == "-f" ) or ( arg == "--force" ) :
forceOutputOverride = True
continue
if ( arg == "-i" ) or ( arg == "--info" ) :
self.displayUserInfo()
self.runParams.run = False
break
if ( arg == "-l" ) or ( arg == "--limit" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.maxEvents = int(nextArg)
skip = True
continue
if ( arg == "-o" ) or ( arg == "--output" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
if nextArg [-15:] <> '-extracted.root':
raise Exception("'" + arg + "': Output file must end with '-extracted.root'!")
self.runParams.outputFile = nextArg
skip = True
continue
if ( arg == "-p" ) or ( arg == "--ptcuts" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
ptCutString = nextArg
self.runParams.pTCuts = self.parsePtCutString(ptCutString)
skip = True
continue
if ( arg == "-v" ) or ( arg == "--visualize" ) :
self.runParams.useVisualization = True
continue
if ( arg == "-vs" ) or ( arg == "--visualize-skip-copies" ) :
self.runParams.visualizationSkipCopies = True
continue
if ( arg == "-vnu" ) or ( arg == "--visualize-no-underlying-event" ) :
self.runParams.visualizationShowUnderlyingEvent = False
continue
if ( arg == "-vni" ) or ( arg == "--visualize-no-main-interaction" ) :
self.runParams.visualizationShowMainInteraction = False
continue
if ( arg == "-vsj" ) or ( arg == "--visualize-color-special-jets" ) :
self.runParams.visualizationColorSpecialJets = True
continue
if ( arg == "-vme" ) or ( arg == "--visualize-mode-energy" ) :
self.runParams.visualizationEnergyMode = True
continue
if ( arg == "-vmp" ) or ( arg == "--visualize-mode-pt" ) :
self.runParams.visualizationPtMode = True
continue
if ( arg == "-vce" ) or ( arg == "--visualize-cutoff-energy" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.visualizationEnergyCutoff = int(nextArg)
skip = True
continue
if ( arg == "-vcr" ) or ( arg == "--visualize-cutoff-radiation" ) :
self.runParams.visualizationCutoffRadiation = True
continue
if ( arg == "-vcs" ) or ( arg == "--visualize-cutoff-special-jets" ) :
self.runParams.visualizationCutSpecialJets = True
continue
#if ( arg == "-vp" ) or ( arg == "--visualize-pt-cutoff" ) :
#if nextArg is None or nextArg[0] == '-':
#raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
#self.runParams.visualizationPtCutoff = int(nextArg)
#skip = True
#continue
if ( arg == "-vr" ) or ( arg == "--visualize-renderer:" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.visualizationRenderer = nextArg
skip = True
continue
#if ( arg == "-z" ) or ( arg == "--zero-jets" ) :
#self.runParams.zeroAdditionalJets = True
#continue
if ( arg == "-#" ) or ( arg == "--events" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
eventsString = nextArg
self.runParams.events = self.parseEventsString(eventsString)
skip = True
continue
if ( arg == "-od" ) or ( arg == "--output-outputdirectory" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.outputDir = nextArg
skip = True
continue
if ( arg == "-m" ) or ( arg == "--multi-processing" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.multiProcessing = int(nextArg)
skip = True
continue
if ( arg == "-%" ) or ( arg == "--modulo" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.modulo = int(nextArg)
skip = True
continue
if ( arg == "-%r" ) or ( arg == "--modulo-rest" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.moduloRest = int(nextArg)
skip = True
continue
if (arg[0] == '-'):
raise Exception("'" + arg + "' is not a valid switch!")
# deny input files ending with '-extracted.root', as this is our signature for output files:
if arg[-15:] == '-extracted.root':
print "Warning: File '" + arg + "' is being skipped."
continue
# parse input files:
if arg[-5:] == '.root':
thisFile = arg
if thisFile[:7] == "/store/":
if not os.path.isfile(thisFile):
thisFile = "root://xrootd.ba.infn.it/" + thisFile
else:
if not os.path.isfile(thisFile):
raise Exception("File '" + thisFile + "' does not exist!")
self.runParams.inputFileList.append(thisFile)
continue
raise Exception("'" + arg + "' is not a valid root file!")
if self.runParams.useVisualization and len(self.runParams.inputFileList) > 1:
raise Exception("Visualization is allowed only for exactly one input file.")
if self.runParams.run:
if os.path.isfile(self.runParams.outputFile) and not forceOutputOverride:
raise Exception("'" + self.runParams.outputFile + "' exists. Use the --force switch to force overriding.")
if len(self.runParams.outputDir) <> 0:
if not os.path.exists(self.runParams.outputDir):
os.makedirs(self.runParams.outputDir)
self.runParams.outputFilePath = self.runParams.outputDir + "/" + self.runParams.outputFile
else:
self.runParams.outputFilePath = self.runParams.outputFile
#self.displayInfo()
| 2.625 | 3 |
floppy/_surf-garbage.py | hillscott/windows | 0 | 17705 | # pip install -U pywinauto
from pywinauto.application import Application
import subprocess
import time
subprocess.run('SCHTASKS /DELETE /TN BuildTasks\\Sites /f')
app = Application(backend='uia')
app.start('C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe --force-renderer-accessibility ')
window = app.top_window()
# Allow the registry installed extensions to load...
time.sleep(45)
ch_window = window.child_window(title="Address and search bar", control_type="Edit")
ch_window.type_keys('^a')
ch_window.type_keys('{BACKSPACE}chrome://extensions/{ENTER}')
time.sleep(3)
# Enable Honey (or disable google drive offline)
dlg = window.button6
try:
dlg.click()
except Exception:
dlg.close()
# Enable Soccer wallpapers (or Soccer wallpapers)
dlg = window.button9
try:
dlg.click()
except Exception:
dlg.close()
# Enable Soccer wallpapers (if it exists)
dlg = window.button12
try:
dlg.click()
except Exception:
dlg.close()
time.sleep(5)
ch_window.type_keys('^a')
ch_window.type_keys('{BACKSPACE}https://thepiratebay.org{ENTER}')
time.sleep(10)
# Allow notifications
dlg = window.AllowButton
try:
dlg.wait_not('visible', timeout=2)
dlg.click()
except Exception:
dlg.close()
ch_window.type_keys('^a')
ch_window.type_keys('{BACKSPACE}{BACKSPACE}https://yts.mx{ENTER}')
time.sleep(3)
window.close()
| 2.390625 | 2 |
sla/migrations/0005_slaprobe_workflow.py | prorevizor/noc | 84 | 17706 | # ----------------------------------------------------------------------
# Migrate SLAProbe to workflow
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from pymongo import UpdateMany
from bson import ObjectId
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
depends_on = [("wf", "0005_slaprobe_default")]
def migrate(self):
coll = self.mongo_db["noc.sla_probes"]
coll.bulk_write(
[
# "Planned"
UpdateMany({}, {"$set": {"state": ObjectId("607a7e1d3d18d4fb3c12032a")}}),
]
)
# Service Profile Workflow
self.mongo_db["noc.sla_profiles"].bulk_write(
[UpdateMany({}, {"$set": {"workflow": ObjectId("607a7dddff3a857a47600b9b")}})]
)
| 1.71875 | 2 |
seqpos/lib/python2.7/site-packages/mercurial/dirstateguard.py | guanjue/seqpos | 0 | 17707 | <filename>seqpos/lib/python2.7/site-packages/mercurial/dirstateguard.py<gh_stars>0
# dirstateguard.py - class to allow restoring dirstate after failure
#
# Copyright 2005-2007 <NAME> <<EMAIL>>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from .i18n import _
from . import (
error,
narrowspec,
util,
)
class dirstateguard(util.transactional):
'''Restore dirstate at unexpected failure.
At the construction, this class does:
- write current ``repo.dirstate`` out, and
- save ``.hg/dirstate`` into the backup file
This restores ``.hg/dirstate`` from backup file, if ``release()``
is invoked before ``close()``.
This just removes the backup file at ``close()`` before ``release()``.
'''
def __init__(self, repo, name):
self._repo = repo
self._active = False
self._closed = False
self._backupname = 'dirstate.backup.%s.%d' % (name, id(self))
self._narrowspecbackupname = ('narrowspec.backup.%s.%d' %
(name, id(self)))
repo.dirstate.savebackup(repo.currenttransaction(), self._backupname)
narrowspec.savebackup(repo, self._narrowspecbackupname)
self._active = True
def __del__(self):
if self._active: # still active
# this may occur, even if this class is used correctly:
# for example, releasing other resources like transaction
# may raise exception before ``dirstateguard.release`` in
# ``release(tr, ....)``.
self._abort()
def close(self):
if not self._active: # already inactivated
msg = (_("can't close already inactivated backup: %s")
% self._backupname)
raise error.Abort(msg)
self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
self._backupname)
narrowspec.clearbackup(self._repo, self._narrowspecbackupname)
self._active = False
self._closed = True
def _abort(self):
narrowspec.restorebackup(self._repo, self._narrowspecbackupname)
self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
self._backupname)
self._active = False
def release(self):
if not self._closed:
if not self._active: # already inactivated
msg = (_("can't release already inactivated backup: %s")
% self._backupname)
raise error.Abort(msg)
self._abort()
| 2.265625 | 2 |
config.py | mF2C/UserManagement | 0 | 17708 | """
CONFIGURATION FILE
This is being developed for the MF2C Project: http://www.mf2c-project.eu/
Copyright: <NAME>, Atos Research and Innovation, 2017.
This code is licensed under an Apache 2.0 license. Please, refer to the LICENSE.TXT file for more information
Created on 18 oct. 2018
@author: <NAME> - ATOS
"""
#!/usr/bin/python
dic = { "VERSION": "1.3.10",
# USER MANAGEMENT MODULE MODE: "DEFAULT", "MF2C" , "STANDALONE"
"UM_MODE": "MF2C",
# CIMI
"CIMI_URL": "http://cimi:8201/api",
"DEVICE_USER": "rsucasas",
# SERVER - REST API
"SERVER_PORT": 46300,
"HOST_IP": "localhost",
"API_DOC_URL": "/api/v2/um",
# working dir: "C://TMP/tmp/mf2c/um/" "/tmp/mf2c/um/"
"UM_WORKING_DIR_VOLUME": "/tmp/mf2c/um/",
# db
"DB_SHARING_MODEL": "dbt1",
"DB_USER_PROFILE": "dbt2",
# VERIFY_SSL controls whether we verify the server's TLS certificate or not
"VERIFY_SSL": False,
# for testing the interaction with the lifecycle management
"ENABLE_ASSESSMENT": True,
# CIMI RESOURCES managed by this component
"CIMI_PROFILES": "user-profile",
"CIMI_SHARING_MODELS": "sharing-model",
"SERVICE_CONSUMER": True,
"RESOURCE_CONTRIBUTOR": True,
"MAX_APPS": 2,
"BATTERY_LIMIT": 50,
"GPS_ALLOWED": True,
"MAX_CPU_USAGE": 50,
"MAX_MEM_USAGE": 50,
"MAX_STO_USAGE": 50,
"MAX_BANDWITH_USAGE": 50,
# URLs / ports from other components:
# LIFECYCLE
"URL_PM_LIFECYCLE": "http://lifecycle:46000/api/v2/lm"
}
# APPS RUNNING
APPS_RUNNING = 0 | 1.53125 | 2 |
api/base/views/__init__.py | simpsonw/atmosphere | 197 | 17709 | from .version import VersionViewSet, DeployVersionViewSet
__all__ = ["VersionViewSet", "DeployVersionViewSet"]
| 1.0625 | 1 |
amaranth/vendor/xilinx_spartan_3_6.py | psumesh/nmigen | 528 | 17710 | <reponame>psumesh/nmigen<filename>amaranth/vendor/xilinx_spartan_3_6.py
import warnings
from .xilinx import XilinxPlatform
__all__ = ["XilinxSpartan3APlatform", "XilinxSpartan6Platform"]
XilinxSpartan3APlatform = XilinxPlatform
XilinxSpartan6Platform = XilinxPlatform
# TODO(amaranth-0.4): remove
warnings.warn("instead of amaranth.vendor.xilinx_spartan_3_6.XilinxSpartan3APlatform and "
".XilinxSpartan6Platform, use amaranth.vendor.xilinx.XilinxPlatform",
DeprecationWarning, stacklevel=2)
| 1.078125 | 1 |
spatialtis/_plotting/api/community_map.py | Mr-Milk/SpatialTis | 10 | 17711 | from ast import literal_eval
from collections import Counter
from typing import Dict, Optional
from anndata import AnnData
from spatialtis.config import Config, analysis_list
from ...utils import doc
from ..base import graph_position_interactive, graph_position_static
from .utils import query_df
@doc
def community_map(
data: AnnData,
roi: Dict,
min_cells: int = 10,
use: str = "static",
community_key: Optional[str] = None,
centroid_key: Optional[str] = None,
neighbors_key: Optional[str] = None,
**plot_options,
):
"""Visualize cell communities in ROI
Args:
data: {adata_plotting}
roi: {roi}
min_cells: Show communities contain more than a number of cells
use: "static" or "interactive" (Default: "static")
community_key: {community_key}
centroid_key: {centroid_key}
neighbors_key: {neighbors_key}
**plot_options: Pass to :class:`spatialtis._plotting.base.graph_position_static` or
:class:`spatialtis._plotting.base.graph_position_interactive`
{pyecharts_tips}
"""
if community_key is None:
community_key = analysis_list["cell_community"].last_used_key
if centroid_key is None:
centroid_key = Config.centroid_key
if neighbors_key is None:
neighbors_key = Config.NEIGHBORS_KEY
df = query_df(data.obs, roi)
nodes_types = df[community_key].tolist()
commus = []
for commu, count in Counter(nodes_types).items():
if count >= min_cells:
commus.append(commu)
df = df.reset_index(drop=True)
xdf = df[df[community_key].isin(commus)]
xdf = xdf.reset_index()
if len(xdf) == 0:
raise ValueError("Seems like there is no cells left to be drawn")
need_eval_nodes = isinstance(xdf[centroid_key][0], str)
need_eval_neighs = isinstance(xdf[neighbors_key][0], str)
if need_eval_nodes:
nodes = [literal_eval(n) for n in xdf[centroid_key]]
else:
nodes = [n for n in xdf[centroid_key]]
if need_eval_neighs:
neighs = [literal_eval(n) for n in xdf[neighbors_key]]
else:
neighs = [n for n in xdf[neighbors_key]]
nodes_types = xdf[community_key]
edges = []
edges_types = []
for i, n in zip(xdf.index, neighs):
for x in n:
new_x = xdf[xdf["index"] == x].index
if len(new_x) == 1:
new_x = new_x[0]
if nodes_types[i] == nodes_types[new_x]:
edges.append((i, new_x))
edges_types.append(nodes_types[i])
plot_options["saved_name"] = "community_map_" + ",".join(
[f"{k}={v}" for k, v in roi.items()]
)
if use == "interactive":
return graph_position_interactive(
nodes, edges, edges_types=edges_types, **plot_options
)
else:
return graph_position_static(
nodes, edges, edges_types=edges_types, **plot_options
)
| 2.515625 | 3 |
pynics/binparse/castep_bin_results.py | ThatPerson/pynics | 2 | 17712 | <filename>pynics/binparse/castep_bin_results.py
# Python 2-to-3 compatibility code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
from pynics.binparse.forbinfile import RecordError
def cbin_results_parse(binfile, results_store, curr_version, params,
current_cell, tolerant=False):
# First, parse the elec results part (always present)
results_store['elec'] = collections.OrderedDict()
cbin_elec_parse(binfile, results_store[
'elec'], curr_version, params, current_cell)
# Then go for the optional stuff
cbin_optional_parse(binfile, results_store, curr_version)
def cbin_elec_parse(binfile, elec_store, curr_version, params, current_cell):
# A few informations are stored
elec_store['found_ground_state_wvfn'] = not (
binfile.read_record('i')[0] == 0) # Logical value
elec_store['found_ground_state_den'] = not (
binfile.read_record('i')[0] == 0) # Logical value
elec_store['total_energy'] = binfile.read_record('d')[0]
elec_store['fermi_energy'] = binfile.read_record('d')
# Fermi energy for both spins if we have two. This relies on param being
# already parsed
if params['nspins'] == 2:
elec_store['fermi_energy'] = (elec_store['fermi_energy'][
0], elec_store['fermi_energy'][0])
elec_store['wvfn_nbands'], elec_store[
'wvfn_nspins'] = binfile.read_record('i')
# Read occupation eigenvalues for the Kohn-Sham states. This relies on
# cell being already parsed
elec_store['occupation'] = {}
for kp_i in range(0, current_cell['nkpts']):
kp = binfile.read_record('d')
elec_store['occupation'][kp] = {'occ': [], 'nrg': []}
for ns_i in range(0, elec_store['wvfn_nspins']):
elec_store['occupation'][kp]['occ'].append(
binfile.read_record('d')) # Occupation
elec_store['occupation'][kp]['nrg'].append(
binfile.read_record('d')) # Energies
# Why is this here again? Whatever.
elec_store['found_ground_state_den'] = not (
binfile.read_record('i')[0] == 0) # Logical value
# Read the fine grid size, keep the information because it is of use for
# various other parsing operations
elec_store['model_ngx_fine'], elec_store['model_ngy_fine'], elec_store[
'model_ngz_fine'] = binfile.read_record('i')
# Finally, dummy read of density
for n in range(0, elec_store['model_ngx_fine'] *
elec_store['model_ngy_fine']):
dummy_int = binfile.read_record('i')
def cbin_optional_parse(binfile, results_store, curr_version, tolerant=False):
if (tolerant): # In this case, unknown sections will simply be ignored
def skip_optional():
while True:
header = self.binfile.read_string_record()
if header.isalpha():
self.binfile.backspace()
break
try:
while True:
header = binfile.read_string_record()
if (header == 'END'):
break
try:
castep_bin_olist[header](binfile, results_store, curr_version)
except KeyError:
if (tolerant):
print("Skipping unrecognized header " + header)
skip_optional()
else:
# The default case, doesn't account for forward
# compatibility for now
raise CastepBinError('Unknown optional section found')
except RecordError:
raise CastepBinError(
'End of file reached while parsing optional blocks')
# Utility routine
def tensor_reshape(V):
return tuple([
tuple([
tuple([V[i+j+k] for i in range(0, 3)
]) for j in range(0, 9, 3)
]) for k in range(0, len(V), 9)])
def opt_e_fermi_parse(binfile, results_store, curr_version):
# Parse Fermi energy for second spin
efermi_2 = binfile.read_record('d')[0]
results_store['elec']['fermi_energy'] = (
results_store['elec']['fermi_energy'][0], efermi_2)
def opt_oep_pot_parse(binfile, results_store, curr_version):
# Parse optimized effective potential
results_store['oep_pot'] = {}
results_store['oep_pot']['found_oep_ground_state'] = not (
binfile.read_record('i') == 0)
results_store['oep_pot']['oep_energy_difference'] = (binfile
.read_record('d')[0])
# We need nspins, we get it indirectly
nspins = len(results_store['elec']['fermi_energy'])
ngx_fine = results_store['elec']['model_ngx_fine']
ngy_fine = results_store['elec']['model_ngy_fine']
ngz_fine = results_store['elec']['model_ngz_fine']
# Sort of necessary to initialize here
results_store['oep_pot']['pot_fine'] = [[0.0 for s in range(
0, nspins)]]*ngx_fine*ngy_fine*ngz_fine
for s_i in range(0, nspins):
for nx1 in range(0, ngx_fine):
for ny1 in range(0, ngy_fine):
nx, ny, grid_charge_r, grid_charge_im = binfile.read_record(
'iidd')
# Fortran convention needs to be used
for nz in range(1, ngz_fine+1):
# Back to Python convention, arrays count from 0
igrid = (nx-1)+(ny-1)*ngx_fine+(nz-1)*ngx_fine*ngy_fine
results_store['oep_pot']['pot_fine'][igrid][
s_i] = (grid_charge_r, grid_charge_im)
def opt_de_dloge_parse(binfile, results_store, curr_version):
# Parse energy logarithmic derivative
results_store['de_dloge'] = binfile.read_record('d')[0]
def opt_forces_parse(binfile, results_store, curr_version):
# Parse forces
f = binfile.read_record('d')
# Reshape
results_store['forces'] = tuple(
[tuple([f[i+j] for i in range(0, 3)]) for j in range(0, len(f), 3)])
def opt_stress_parse(binfile, results_store, curr_version):
# Parse stress & strain tensors
results_store['stress'] = {}
stress = binfile.read_record('d')
strain = binfile.read_record('d')
results_store['stress']['stress'] = stress
results_store['stress']['strain'] = tensor_reshape(strain)
def opt_shielding_parse(binfile, results_store, curr_version):
# Parse NMR shieldings
results_store['shielding'] = {}
results_store['shielding']['ms'] = binfile.read_record('d')
results_store['shielding']['sus'] = binfile.read_record('d')
# Reshape ms as a list of tensors
results_store['shielding']['ms'] = tensor_reshape(
results_store['shielding']['ms'])
def opt_efg_parse(binfile, results_store, curr_version):
results_store['efg'] = binfile.read_record('d')
results_store['efg'] = tensor_reshape(results_store['efg'])
castep_bin_olist = {
'E_FERMI': opt_e_fermi_parse,
'OEP_POT': opt_oep_pot_parse,
'DE_DLOGE': opt_de_dloge_parse,
'FORCES': opt_forces_parse,
'STRESS': opt_stress_parse,
'SHIELDING': opt_shielding_parse,
'EFG': opt_efg_parse,
}
| 2.40625 | 2 |
epi-poc-demo/node-b/node-b.py | onnovalkering/epif-poc | 0 | 17713 | <reponame>onnovalkering/epif-poc
import os
import socket
import threading
HEADER = 64
PORT = 5053
FW = "192.168.101.2"
ADDR = (FW, PORT)
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
def handle_client(conn, addr):
print(f"[FIREWALL CONNECTION] {addr} connected.")
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
if msg == DISCONNECT_MESSAGE:
connected = False
print(f"[{addr}] {msg}")
conn.send("Msg received".encode(FORMAT))
conn.close()
def start():
server.listen()
print(f"[LISTENING] firewall is running on {FW}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}")
print("[STARTING] server is starting...")
start()
| 2.75 | 3 |
sliding_window/equal_substring.py | sleebapaul/codeforces | 0 | 17714 | """
1208. Get Equal Substrings Within Budget
Straight forward. Asked the max len, so count the max each time.
"""
class Solution:
def equalSubstring(self, s: str, t: str, maxCost: int) -> int:
cost = 0
window_start = 0
result = 0
for window_end in range(len(s)):
cost += abs(ord(s[window_end]) - ord(t[window_end]))
if cost > maxCost:
cost -= abs(ord(s[window_start]) - ord(t[window_start]))
window_start += 1
result = max(result, window_end - window_start+1)
return result | 3.25 | 3 |
test/functional/esperanza_withdraw.py | frolosofsky/unit-e | 0 | 17715 | <gh_stars>0
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Unit-e developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import UnitETestFramework
from test_framework.util import (
json,
connect_nodes,
disconnect_nodes,
assert_equal,
assert_finalizationstate,
assert_raises_rpc_error,
sync_blocks,
wait_until,
)
from decimal import Decimal
import time
LOGOUT_DYNASTY_DELAY = 3
WITHDRAW_EPOCH_DELAY = 12
class EsperanzaWithdrawTest(UnitETestFramework):
def set_test_params(self):
self.num_nodes = 3
esperanza_config = {
'dynastyLogoutDelay': LOGOUT_DYNASTY_DELAY,
'withdrawalEpochDelay': WITHDRAW_EPOCH_DELAY
}
json_params = json.dumps(esperanza_config)
finalizer_node_params = ['-esperanzaconfig=' + json_params, '-validating=1']
proposer_node_params = ['-esperanzaconfig=' + json_params]
self.extra_args = [
proposer_node_params,
finalizer_node_params,
finalizer_node_params,
]
self.setup_clean_chain = True
# create topology where arrows denote non-persistent connection
# finalizer1 → proposer ← finalizer2
def setup_network(self):
self.setup_nodes()
proposer = self.nodes[0]
finalizer1 = self.nodes[1]
finalizer2 = self.nodes[2]
connect_nodes(finalizer1, proposer.index)
connect_nodes(finalizer2, proposer.index)
def run_test(self):
proposer = self.nodes[0]
finalizer1 = self.nodes[1]
finalizer2 = self.nodes[2]
self.setup_stake_coins(*self.nodes)
# Leave IBD
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
sync_blocks([proposer, finalizer1, finalizer2], timeout=10)
finalizer1_address = finalizer1.getnewaddress('', 'legacy')
# create deposits
# F
# e0 - e1
# d1
# d2
d1 = finalizer1.deposit(finalizer1_address, 1500)
d2 = finalizer2.deposit(finalizer2.getnewaddress('', 'legacy'), 1500)
self.wait_for_transaction(d1, timeout=10)
self.wait_for_transaction(d2, timeout=10)
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
sync_blocks([proposer, finalizer1, finalizer2], timeout=10)
disconnect_nodes(finalizer1, proposer.index)
disconnect_nodes(finalizer2, proposer.index)
assert_equal(proposer.getblockcount(), 2)
assert_finalizationstate(proposer, {'currentDynasty': 0,
'currentEpoch': 1,
'lastJustifiedEpoch': 0,
'lastFinalizedEpoch': 0,
'validators': 0})
self.log.info('deposits are created')
# Generate enough blocks to activate deposits
# F F F F J
# e0 - e1 - e2 - e3 - e4 - e5 - e6[26]
# d1
# d2
proposer.generatetoaddress(3 + 5 + 5 + 5 + 5, proposer.getnewaddress('', 'bech32'))
assert_equal(proposer.getblockcount(), 25)
assert_finalizationstate(proposer, {'currentDynasty': 2,
'currentEpoch': 5,
'lastJustifiedEpoch': 4,
'lastFinalizedEpoch': 3,
'validators': 0})
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
assert_equal(proposer.getblockcount(), 26)
assert_finalizationstate(proposer, {'currentDynasty': 3,
'currentEpoch': 6,
'lastJustifiedEpoch': 4,
'lastFinalizedEpoch': 3,
'validators': 2})
self.log.info('finalizers are created')
# Logout finalizer1
# F F F F J
# e0 - e1 - e2 - e3 - e4 - e5 - e6[26]
# d1 l1
# d2
self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=proposer)
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
# TODO UNIT-E: logout tx can't be created if its vote is not in the block
# we should check that input of logout tx is in the mempool too
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
connect_nodes(finalizer1, proposer.index)
sync_blocks([finalizer1, proposer], timeout=10)
l1 = finalizer1.logout()
wait_until(lambda: l1 in proposer.getrawmempool(), timeout=10)
disconnect_nodes(finalizer1, proposer.index)
proposer.generatetoaddress(3, proposer.getnewaddress('', 'bech32'))
assert_equal(proposer.getblockcount(), 30)
assert_finalizationstate(proposer, {'currentDynasty': 3,
'currentEpoch': 6,
'lastJustifiedEpoch': 5,
'lastFinalizedEpoch': 4,
'validators': 2})
self.log.info('finalizer1 logged out in dynasty=3')
# During LOGOUT_DYNASTY_DELAY both finalizers can vote.
# Since the finalization happens at every epoch,
# number of dynasties is equal to number of epochs.
for _ in range(LOGOUT_DYNASTY_DELAY):
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=proposer)
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
proposer.generatetoaddress(4, proposer.getnewaddress('', 'bech32'))
assert_equal(proposer.getblockcount(), 45)
assert_finalizationstate(proposer, {'currentDynasty': 6,
'currentEpoch': 9,
'lastJustifiedEpoch': 8,
'lastFinalizedEpoch': 7,
'validators': 2})
self.log.info('finalizer1 voted during logout delay successfully')
# During WITHDRAW_DELAY finalizer1 can't vote and can't withdraw
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
assert_finalizationstate(proposer, {'currentDynasty': 7,
'currentEpoch': 10,
'lastJustifiedEpoch': 8,
'lastFinalizedEpoch': 7,
'validators': 1})
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
assert_finalizationstate(proposer, {'currentDynasty': 7,
'currentEpoch': 10,
'lastJustifiedEpoch': 9,
'lastFinalizedEpoch': 8,
'validators': 1})
# finalizer1 can't vote so we keep it connected
connect_nodes(finalizer1, proposer.index)
time.sleep(2) # ensure no votes from finalizer1
assert_equal(len(proposer.getrawmempool()), 0)
proposer.generatetoaddress(3, proposer.getnewaddress('', 'bech32'))
assert_equal(proposer.getblockcount(), 50)
assert_finalizationstate(proposer, {'currentDynasty': 7,
'currentEpoch': 10,
'lastJustifiedEpoch': 9,
'lastFinalizedEpoch': 8,
'validators': 1})
# WITHDRAW_DELAY - 2 is because:
# -1 as we checked the first loop manually
# -1 as at this epoch we should be able to withdraw already
for _ in range(WITHDRAW_EPOCH_DELAY - 2):
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
proposer.generatetoaddress(4, proposer.getnewaddress('', 'bech32'))
assert_equal(proposer.getblockcount(), 100)
assert_finalizationstate(proposer, {'currentDynasty': 17,
'currentEpoch': 20,
'lastJustifiedEpoch': 19,
'lastFinalizedEpoch': 18,
'validators': 1})
# last block that finalizer1 can't withdraw
# TODO UNIT-E: allow to create a withdraw tx on checkpoint
# as it will be added to the block on the next epoch only.
# We have an known issue https://github.com/dtr-org/unit-e/issues/643
# that finalizer can't vote after checkpoint is processed, it looks that
# finalizer can't create any finalizer commits at this point (and only at this point).
assert_raises_rpc_error(-8, 'Cannot send withdraw transaction.', finalizer1.withdraw, finalizer1_address)
self.log.info('finalizer1 could not withdraw during WITHDRAW_DELAY period')
# test that deposit can be withdrawn
# e0 - e1 - ... - e6 - ... - e21[101, 102]
# d1 l1 w1
# d2
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
assert_equal(proposer.getblockcount(), 101)
assert_finalizationstate(proposer, {'currentDynasty': 18,
'currentEpoch': 21,
'lastJustifiedEpoch': 19,
'lastFinalizedEpoch': 18,
'validators': 1})
sync_blocks([proposer, finalizer1], timeout=10)
w1 = finalizer1.withdraw(finalizer1_address)
wait_until(lambda: w1 in proposer.getrawmempool(), timeout=10)
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
sync_blocks([proposer, finalizer1])
self.log.info('finalizer1 was able to withdraw deposit at dynasty=18')
# test that withdraw commit can be spent
# test that deposit can be withdrawn
# e0 - e1 - ... - e6 - ... - e21[101, 102, 103]
# d1 l1 w1 spent_w1
# d2
spent_w1_raw = finalizer1.createrawtransaction(
[{'txid': w1, 'vout': 0}], {finalizer1_address: Decimal('1499.999')})
spent_w1_signed = finalizer1.signrawtransaction(spent_w1_raw)
spent_w1 = finalizer1.sendrawtransaction(spent_w1_signed['hex'])
self.wait_for_transaction(spent_w1, nodes=[proposer])
# mine block
block_hash = proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))[0]
assert spent_w1 in proposer.getblock(block_hash)['tx']
self.log.info('finalizer1 was able to spend withdraw commit')
# Test that after withdraw the node can deposit again
sync_blocks([proposer, finalizer1], timeout=10)
assert_equal(proposer.getblockcount(), 103)
wait_until(lambda: finalizer1.getvalidatorinfo()['validator_status'] == 'NOT_VALIDATING',
timeout=5)
deposit = finalizer1.deposit(finalizer1.getnewaddress('', 'legacy'), 1500)
wait_until(lambda: finalizer1.getvalidatorinfo()['validator_status'] == 'WAITING_DEPOSIT_CONFIRMATION',
timeout=5)
self.wait_for_transaction(deposit, timeout=10, nodes=[proposer, finalizer1])
proposer.generate(1)
sync_blocks([proposer, finalizer1], timeout=10)
assert_equal(proposer.getblockcount(), 104)
wait_until(lambda: finalizer1.getvalidatorinfo()['validator_status'] == 'WAITING_DEPOSIT_FINALIZATION',
timeout=20)
self.log.info('finalizer1 deposits again')
disconnect_nodes(finalizer1, proposer.index)
proposer.generate(2)
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
assert_equal(proposer.getblockcount(), 106)
proposer.generate(5)
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
assert_equal(proposer.getblockcount(), 111)
assert_finalizationstate(proposer, {'currentDynasty': 20,
'currentEpoch': 23,
'lastJustifiedEpoch': 21,
'lastFinalizedEpoch': 20,
'validators': 1})
proposer.generate(5)
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
assert_equal(proposer.getblockcount(), 116)
assert_finalizationstate(proposer, {'currentDynasty': 21,
'currentEpoch': 24,
'lastJustifiedEpoch': 22,
'lastFinalizedEpoch': 21,
'validators': 2})
self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=proposer)
self.log.info('finalizer1 votes again')
if __name__ == '__main__':
EsperanzaWithdrawTest().main()
| 2.234375 | 2 |
test/geocoders/placefinder.py | gongso1st/geopy | 1 | 17716 | <gh_stars>1-10
import unittest
from geopy.compat import u
from geopy.point import Point
from geopy.geocoders import YahooPlaceFinder
from test.geocoders.util import GeocoderTestBase, env
class YahooPlaceFinderTestCaseUnitTest(GeocoderTestBase): # pylint: disable=R0904,C0111
def test_user_agent_custom(self):
geocoder = YahooPlaceFinder(
consumer_key='DUMMYKEY1234',
consumer_secret='DUMMYSECRET',
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
@unittest.skipUnless( # pylint: disable=R0904,C0111
bool(env.get('YAHOO_KEY')) and bool(env.get('YAHOO_SECRET')),
"YAHOO_KEY and YAHOO_SECRET env variables not set"
)
class YahooPlaceFinderTestCase(GeocoderTestBase): # pylint: disable=R0904,C0111
@classmethod
def setUpClass(cls):
cls.geocoder = YahooPlaceFinder(
env['YAHOO_KEY'],
env['YAHOO_SECRET']
)
def test_geocode(self):
"""
YahooPlaceFinder.geocode
"""
self.geocode_run(
{"query": "nyc"},
{"latitude": 40.71455, "longitude": -74.00712},
)
def test_unicode_name(self):
"""
YahooPlaceFinder.geocode unicode
"""
self.geocode_run(
{"query": u("\u6545\u5bab")},
{"latitude": 39.916, "longitude": 116.390},
)
def test_reverse_string(self):
"""
YahooPlaceFinder.reverse string
"""
self.reverse_run(
{"query": "40.75376406311989, -73.98489005863667"},
{"latitude": 40.75376406311989, "longitude": -73.98489005863667}
)
def test_reverse_point(self):
"""
YahooPlaceFinder.reverse Point
"""
self.reverse_run(
{"query": Point(40.75376406311989, -73.98489005863667)},
{"latitude": 40.75376406311989, "longitude": -73.98489005863667}
)
def test_timezone(self):
"""
YahooPlacefinder.with_timezone
"""
self.geocode_run(
{"query": "nyc", "with_timezone": True},
{"latitude": 40.71455, "longitude": -74.00712},
)
| 2.515625 | 3 |
inetdxmlrpc.py | Leonidas-from-XIV/sandbox | 0 | 17717 | <reponame>Leonidas-from-XIV/sandbox<gh_stars>0
#!/usr/bin/env python2.4
# -*- encoding: latin-1 -*-
"""A small XML-RPC Server running under control
of the internet superserver inetd.
Configuring:
Add this line to your inetd.conf
embedxmlrpc stream tcp nowait user /usr/sbin/tcpd inetdxmlrpc.py
Where user is the user to execute the script and
inetdxmlprc.py the path to the script.
and this line to your services.conf
embedxmlrpc 7373/tcp # standalone XML-RPC server
there 7373 will be the port
You have to restart your inetd.
"""
import sys, xmlrpclib
def sumAndDifference(a, b):
return a + b
funcs = {"sumAndDifference": sumAndDifference}
def inetdcall():
while True:
line = sys.stdin.readline().splitlines()[0]
if "Content-Length:" in line:
cl = line.split()[1]
cl = int(cl)
sys.stdin.readline()
break
request = sys.stdin.read(cl)
params, method = xmlrpclib.loads(request)
result = funcs[method](*params)
response = xmlrpclib.dumps((result,), methodresponse=True)
sys.stdout.write(response)
if __name__ == '__main__':
inetdcall()
| 2.15625 | 2 |
examples/tensorflow/train/crnn_chinese/code_multi/tools/train_shadownet_multi.py | soar-zhengjian/uai-sdk | 38 | 17718 | """
Train shadow net script
"""
import argparse
import functools
import itertools
import os
import os.path as ops
import sys
import time
import numpy as np
import tensorflow as tf
import pprint
import shadownet
import six
from six.moves import xrange # pylint: disable=redefined-builtin
sys.path.append('/data/')
from crnn_model import crnn_model
from local_utils import data_utils, log_utils, tensorboard_vis_summary
from global_configuration import config
from uaitrain.arch.tensorflow import uflag
from typing import List
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.training import device_setter
tf.app.flags.DEFINE_string('dataset_dir','/data/data/tfrecords','data path')
tf.app.flags.DEFINE_string('weights_path',None,'weight path')
FLAGS = tf.app.flags.FLAGS
logger = log_utils.init_logger()
def local_device_setter(num_devices=1,
ps_device_type='cpu',
worker_device='/cpu:0',
ps_ops=None,
ps_strategy=None):
if ps_ops == None:
ps_ops = ['Variable', 'VariableV2', 'VarHandleOp']
if ps_strategy is None:
ps_strategy = device_setter._RoundRobinStrategy(num_devices)
if not six.callable(ps_strategy):
raise TypeError("ps_strategy must be callable")
def _local_device_chooser(op):
current_device = pydev.DeviceSpec.from_string(op.device or "")
node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
if node_def.op in ps_ops:
ps_device_spec = pydev.DeviceSpec.from_string(
'/{}:{}'.format(ps_device_type, ps_strategy(op)))
ps_device_spec.merge_from(current_device)
return ps_device_spec.to_string()
else:
worker_device_spec = pydev.DeviceSpec.from_string(worker_device or "")
worker_device_spec.merge_from(current_device)
return worker_device_spec.to_string()
return _local_device_chooser
def get_words_from_chars(characters_list: List[str], sequence_lengths: List[int], name='chars_conversion'):
with tf.name_scope(name=name):
def join_charcaters_fn(coords):
return tf.reduce_join(characters_list[coords[0]:coords[1]])
def coords_several_sequences():
end_coords = tf.cumsum(sequence_lengths)
start_coords = tf.concat([[0], end_coords[:-1]], axis=0)
coords = tf.stack([start_coords, end_coords], axis=1)
coords = tf.cast(coords, dtype=tf.int32)
return tf.map_fn(join_charcaters_fn, coords, dtype=tf.string)
def coords_single_sequence():
return tf.reduce_join(characters_list, keep_dims=True)
words = tf.cond(tf.shape(sequence_lengths)[0] > 1,
true_fn=lambda: coords_several_sequences(),
false_fn=lambda: coords_single_sequence())
return words
def get_shadownet_fn(num_gpus, variable_strategy, num_workers):
"""Returns a function that will build shadownet model."""
def _shadownet_fun(features, labels, mode, params):
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
tower_features = features
tower_labels = labels
tower_losses = []
tower_gradvars = []
tower_preds = []
tower_tensor_dict = []
tower_seq_len = []
num_devices = num_gpus
device_type = 'gpu'
tower_batch_size = int(params.batch_size / num_devices)
for i in range(num_devices):
worker_device = '/{}:{}'.format(device_type, i)
device_setter = local_device_setter(worker_device=worker_device)
with tf.variable_scope('shadownet', reuse=bool(i != 0)):
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device(device_setter):
loss, gradvars, preds, tensor_dict, seq_len = _tower_fn(
is_training, tower_features[i], tower_labels[i], tower_batch_size, params.l_size)
tower_losses.append(loss)
tower_gradvars.append(gradvars)
tower_preds.append(preds)
tower_tensor_dict.append(tensor_dict)
tower_seq_len.append(seq_len)
if i == 0:
# Only trigger batch_norm moving mean and variance update from
# the 1st tower. Ideally, we should grab the updates from all
# towers but these stats accumulate extremely fast so we can
# ignore the other stats from the other towers without
# significant detriment.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
name_scope)
# Now compute global loss and gradients.
gradvars = []
with tf.name_scope('gradient_averaging'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in six.iteritems(all_grads):
# Average gradients on the same device as the variables
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
# Device that runs the ops to apply global gradient updates.
consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'
with tf.device(consolidation_device):
global_step = tf.train.get_global_step()
starter_learning_rate = params.learning_rate
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
params.decay_steps, params.decay_rate,
staircase=True)
loss = tf.reduce_mean(tower_losses, name='loss')
decoded, log_prob = tf.nn.ctc_beam_search_decoder(tower_preds[0],
tower_seq_len[0]*np.ones(tower_batch_size),
merge_repeated=False)
sequence_dist = tf.reduce_mean(tf.edit_distance(tf.cast(decoded[0], tf.int32), tower_labels[0]))
sequence_lengths_pred = tf.bincount(tf.cast(decoded[0].indices[:, 0], tf.int32),
minlength=tf.shape(tower_labels[0])[1])
label_lengths_pred = tf.bincount(tf.cast(labels[0].indices[:, 0], tf.int32),
minlength=tf.shape(tower_labels[0])[1])
tensors_to_log = {'global_step': global_step, 'learning_rate': learning_rate, 'loss': loss}
dist_to_log = {'global_step': global_step,
'learning_rate': learning_rate,
'loss': loss,
'train_seq_dist': sequence_dist,
'sequence_lengths_pred': sequence_lengths_pred,
'label_lengths_pred': label_lengths_pred}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=10)
dist_hook = tf.train.LoggingTensorHook(
tensors=dist_to_log, every_n_iter=1000)
train_hooks = [logging_hook, dist_hook]
seq_dist_sum = tf.summary.scalar(name='Seq_Dist', tensor=sequence_dist)
lr_sum = tf.summary.scalar(name='Learning_rate', tensor=learning_rate)
summaries = [seq_dist_sum, lr_sum]
summary_hook = tf.train.SummarySaverHook(
save_steps=1000,
output_dir='/data/output/',
summary_op=summaries)
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
if params.sync:
optimizer = tf.train.SyncReplicasOptimizer(
optimizer, replicas_to_aggregate=num_workers)
sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)
train_hooks.append(sync_replicas_hook)
# Create single grouped train op
train_op = [
optimizer.apply_gradients(
gradvars, global_step=tf.train.get_global_step())
]
train_op.extend(update_ops)
train_op = tf.group(*train_op)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
training_hooks=train_hooks)
return _shadownet_fun
def _tower_fn(is_training, feature, label, batch_size, l_size):
seq_len=l_size
shadownet = crnn_model.ShadowNet(phase='Train', hidden_nums=256, layers_nums=2, seq_length=seq_len,
num_classes=config.cfg.TRAIN.CLASSES_NUMS, rnn_cell_type='lstm')
imgs = tf.image.resize_images(feature, (32, l_size*4), method=0)
input_imgs = tf.cast(x=imgs, dtype=tf.float32)
with tf.variable_scope('shadow', reuse=False):
net_out, tensor_dict = shadownet.build_shadownet(inputdata=input_imgs)
cost = tf.reduce_mean(tf.nn.ctc_loss(labels=label, inputs=net_out,
sequence_length=seq_len*np.ones(batch_size)))
#lstm l2 normalization loss
lstm_tv = tf.trainable_variables(scope='LSTMLayers')
r_lambda = 0.001
regularization_cost = r_lambda * tf.reduce_sum([tf.nn.l2_loss(v) for v in lstm_tv])
cost = cost + regularization_cost
model_params = tf.trainable_variables()
tower_grad = tf.gradients(cost, model_params)
return cost, zip(tower_grad, model_params), net_out, tensor_dict, seq_len
def input_fn(data_dir,
subset,
num_shards,
batch_size,
use_distortion_for_training=True):
"""Create input graph for model.
Args:
data_dir: Directory where TFRecords representing the dataset are located.
subset: one of 'train', 'validate' and 'eval'.
num_shards: num of towers participating in data-parallel training.
batch_size: total batch size for training to be divided by the number of
shards.
use_distortion_for_training: True to use distortions.
Returns:
three
"""
with tf.device('/cpu:0'):
use_distortion = subset == 'train' and use_distortion_for_training
dataset = shadownet.ShadownetDataSet(data_dir, subset, use_distortion)
inputdata, input_labels = dataset.make_batch(batch_size)
if num_shards <= 1:
# No GPU available or only 1 GPU.
num_shards = 1
feature_shards = tf.split(inputdata, num_shards)
label_shards = tf.sparse_split(sp_input=input_labels, num_split=num_shards, axis=0)
return feature_shards, label_shards
def get_experiment_fn(data_dir,
num_gpus,
use_distortion_for_training=True):
def _experiment_fn(run_config, hparams):
"""Returns an Experiment."""
# Create estimator.
train_input_fn = functools.partial(
input_fn,
data_dir,
subset='train',
num_shards=num_gpus,
batch_size=hparams.batch_size,
use_distortion_for_training=use_distortion_for_training)
eval_input_fn = functools.partial(
input_fn,
data_dir,
subset='validation',
batch_size=hparams.batch_size,
num_shards=num_gpus)
train_steps = hparams.steps
eval_steps = 2048 // hparams.batch_size
variable_strategy = 'CPU'
classifier = tf.estimator.Estimator(
model_fn=get_shadownet_fn(num_gpus,
variable_strategy,
run_config.num_worker_replicas or 1),
config=run_config,
params=hparams)
# Create experiment.
return tf.contrib.learn.Experiment(
classifier,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=train_steps,
eval_steps=eval_steps,
min_eval_frequency=100)
return _experiment_fn
def main(num_gpus, log_device_placement, num_intra_threads, data_dir, output_dir, tfrecord_dir, **hparams):
# The env variable is on deprecation path, default is set to off.
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
data_dir = os.path.join(data_dir, tfrecord_dir)
# Session configuration.
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=log_device_placement,
intra_op_parallelism_threads=num_intra_threads,
gpu_options=tf.GPUOptions(force_gpu_compatible=True))
config = tf.contrib.learn.RunConfig(session_config=sess_config, model_dir=output_dir)
tf.contrib.learn.learn_runner.run(
get_experiment_fn(data_dir, num_gpus),
run_config=config,
hparams=tf.contrib.training.HParams(
is_chief=config.is_chief,
**hparams))
if __name__ == '__main__':
# init args
# args = init_args()
#if not ops.exists(args.dataset_dir):
# raise ValueError('{:s} doesn\'t exist'.format(args.dataset_dir))
#train_shadownet(args.dataset_dir, args.weights_path)
# if args.weights_path is not None and 'two_stage' in args.weights_path:
# train_shadownet(args.dataset_dir, args.weights_path, restore_from_cnn_subnet_work=False)
# elif args.weights_path is not None and 'cnnsub' in args.weights_path:
# train_shadownet(args.dataset_dir, args.weights_path, restore_from_cnn_subnet_work=True)
# else:
# train_shadownet(args.dataset_dir)
parser = argparse.ArgumentParser()
parser.add_argument(
'--num_gpus',
type=int,
default=1,
help='UAI-SDK related. The number of gpus used.')
parser.add_argument(
'--log-device-placement',
action='store_true',
default=False,
help='Whether to log device placement.')
parser.add_argument(
'--num-intra-threads',
type=int,
default=0,
help="""\
Number of threads to use for intra-op parallelism. When training on CPU
set to 0 to have the system pick the appropriate number or alternatively
set it to the number of physical CPU cores.\
""")
parser.add_argument(
'--num-inter-threads',
type=int,
default=0,
help="""\
Number of threads to use for inter-op parallelism. If set to 0, the
system will pick an appropriate number.\
""")
parser.add_argument(
'--sync',
action='store_true',
default=False,
help="""\
If present when running in a distributed environment will run on sync mode.\
""")
parser.add_argument(
'--work_dir',
type=str,
default='/data/',
help='UAI SDK related.')
parser.add_argument(
'--data_dir',
type=str,
required=True,
help='UAI-SDK related. The directory where the CIFAR-10 input data is stored.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='UAI-SDK related. The directory where the model will be stored.')
parser.add_argument(
'--log_dir',
type=str,
default='/data/data/',
help='UAI SDK related.')
parser.add_argument(
'--l_size',
type=int,
default=10,
help="""l_batch_label, how many labels CNN net work will output into LSTM""")
parser.add_argument(
'--learning_rate',
type=float,
default=0.1)
parser.add_argument(
'--decay_rate',
type=float,
default=0.1)
parser.add_argument(
'--decay_steps',
type=int,
default=40000)
parser.add_argument(
'--steps',
type=int,
default=200000)
parser.add_argument(
'--batch_size',
type=int,
default=512)
parser.add_argument(
'--tfrecord_dir',
type=str,
default='tfrecords')
args = parser.parse_args()
main(**vars(args))
print('Done') | 2.03125 | 2 |
ovs/extensions/hypervisor/hypervisors/vmware.py | mflu/openvstorage_centos | 1 | 17719 | # Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for the VMware hypervisor client
"""
import os
from ovs.extensions.hypervisor.apis.vmware.sdk import Sdk
class VMware(object):
"""
Represents the hypervisor client for VMware
"""
def __init__(self, ip, username, password):
"""
Initializes the object with credentials and connection information
"""
self.sdk = Sdk(ip, username, password)
self.state_mapping = {'poweredOn' : 'RUNNING',
'poweredOff': 'HALTED',
'suspended' : 'PAUSED'}
def get_state(self, vmid):
"""
Get the current power state of a virtual machine
@param vmid: hypervisor id of the virtual machine
"""
return self.state_mapping[self.sdk.get_power_state(vmid)]
def create_vm_from_template(self, name, source_vm, disks, ip, mountpoint, wait=True):
"""
Create a new vmachine from an existing template
"""
task = self.sdk.create_vm_from_template(name, source_vm, disks, ip, mountpoint, wait)
if wait is True:
if self.sdk.validate_result(task):
task_info = self.sdk.get_task_info(task)
return task_info.info.result.value
return None
def clone_vm(self, vmid, name, disks, wait=False):
"""
Clone a vmachine
@param vmid: hypervisor id of the virtual machine
@param name: name of the virtual machine
@param disks: list of disk information
@param wait: wait for action to complete
"""
task = self.sdk.clone_vm(vmid, name, disks, wait)
if wait is True:
if self.sdk.validate_result(task):
task_info = self.sdk.get_task_info(task)
return task_info.info.result.value
return None
def delete_vm(self, vmid, storagedriver_mountpoint, storagedriver_storage_ip, devicename, disks_info=None, wait=False):
"""
Remove the vmachine from the hypervisor
@param vmid: hypervisor id of the virtual machine
@param wait: wait for action to complete
"""
if disks_info is None:
disks_info = []
_ = disks_info
self.sdk.delete_vm(vmid, storagedriver_mountpoint, storagedriver_storage_ip, devicename, wait)
def get_vm_object(self, vmid):
"""
Gets the VMware virtual machine object from VMware by its identifier
"""
return self.sdk.get_vm(vmid)
def get_vm_agnostic_object(self, vmid):
"""
Gets the VMware virtual machine object from VMware by its identifier
"""
return self.sdk.make_agnostic_config(self.sdk.get_vm(vmid))
def get_vm_object_by_devicename(self, devicename, ip, mountpoint):
"""
Gets the VMware virtual machine object from VMware by devicename
and datastore identifiers
"""
return self.sdk.make_agnostic_config(self.sdk.get_nfs_datastore_object(ip, mountpoint, devicename)[0])
def get_vms_by_nfs_mountinfo(self, ip, mountpoint):
"""
Gets a list of agnostic vm objects for a given ip and mountpoint
"""
for vm in self.sdk.get_vms(ip, mountpoint):
yield self.sdk.make_agnostic_config(vm)
def is_datastore_available(self, ip, mountpoint):
"""
@param ip : hypervisor ip to query for datastore presence
@param mountpoint: nfs mountpoint on hypervisor
@rtype: boolean
@return: True | False
"""
return self.sdk.is_datastore_available(ip, mountpoint)
def set_as_template(self, vmid, disks, wait=False):
"""
Configure a vm as template
This lets the machine exist on the hypervisor but configures
all disks as "Independent Non-persistent"
@param vmid: hypervisor id of the virtual machine
"""
return self.sdk.set_disk_mode(vmid, disks, 'independent_nonpersistent', wait)
def mount_nfs_datastore(self, name, remote_host, remote_path):
"""
Mounts a given NFS export as a datastore
"""
return self.sdk.mount_nfs_datastore(name, remote_host, remote_path)
def test_connection(self):
"""
Checks whether this node is a vCenter
"""
return self.sdk.test_connection()
def clean_backing_disk_filename(self, path):
"""
Cleans a backing disk filename to the corresponding disk filename
"""
_ = self
return path.replace('-flat.vmdk', '.vmdk').strip('/')
def get_backing_disk_path(self, machinename, devicename):
"""
Builds the path for the file backing a given device/disk
"""
_ = self
return '/{}/{}-flat.vmdk'.format(machinename.replace(' ', '_'), devicename)
def get_disk_path(self, machinename, devicename):
"""
Builds the path for the file backing a given device/disk
"""
_ = self
return '/{}/{}.vmdk'.format(machinename.replace(' ', '_'), devicename)
def clean_vmachine_filename(self, path):
"""
Cleans a VM filename
"""
_ = self
return path.strip('/')
def get_vmachine_path(self, machinename, storagerouter_machineid):
"""
Builds the path for the file representing a given vmachine
"""
_ = self, storagerouter_machineid # For compatibility purposes only
machinename = machinename.replace(' ', '_')
return '/{}/{}.vmx'.format(machinename, machinename)
def get_rename_scenario(self, old_name, new_name):
"""
Gets the rename scenario based on the old and new name
"""
_ = self
if old_name.endswith('.vmx') and new_name.endswith('.vmx'):
return 'RENAME'
elif old_name.endswith('.vmx~') and new_name.endswith('.vmx'):
return 'UPDATE'
return 'UNSUPPORTED'
def should_process(self, devicename, machine_ids=None):
"""
Checks whether a given device should be processed
"""
_ = self, devicename, machine_ids
return True
def file_exists(self, vpool, devicename):
"""
Check if devicename exists on the given vpool
"""
_ = self
filename = '/mnt/{0}/{1}'.format(vpool.name, devicename)
return os.path.exists(filename) and os.path.isfile(filename)
| 2.03125 | 2 |
fake_switches/dell10g/command_processor/config_interface.py | idjaw/fake-switches | 0 | 17720 | <gh_stars>0
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fake_switches.dell.command_processor.config_interface import DellConfigInterfaceCommandProcessor, parse_vlan_list
from fake_switches.switch_configuration import AggregatedPort
class Dell10GConfigInterfaceCommandProcessor(DellConfigInterfaceCommandProcessor):
def __init__(self, switch_configuration, terminal_controller, logger,
piping_processor, port):
super(DellConfigInterfaceCommandProcessor, self).__init__(
switch_configuration, terminal_controller, logger, piping_processor,
port)
self.description_strip_chars = "\"'"
def get_prompt(self):
short_name = self.port.name.split(' ')[1]
return "{}(config-if-{}{})#".format(
self.switch_configuration.name,
"Po" if isinstance(self.port, AggregatedPort) else "Te",
short_name)
def configure_lldp_port(self, args, target_value):
if "transmit".startswith(args[0]):
self.port.lldp_transmit = target_value
elif "receive".startswith(args[0]):
self.port.lldp_receive = target_value
elif "med".startswith(args[0]):
if len(args) == 1:
self.port.lldp_med = target_value
elif "transmit-tlv".startswith(args[1]):
if "capabilities".startswith(args[2]):
self.port.lldp_med_transmit_capabilities = target_value
elif "network-policy".startswith(args[2]):
self.port.lldp_med_transmit_network_policy = target_value
def do_switchport(self, *args):
if "access".startswith(args[0]) and "vlan".startswith(args[1]):
self.set_access_vlan(int(args[2]))
elif "mode".startswith(args[0]):
self.set_switchport_mode(args[1])
elif ("general".startswith(args[0]) or "trunk".startswith(args[0])) and "allowed".startswith(args[1]):
if "vlan".startswith(args[2]) and args[0] == "general":
if len(args) > 5:
self.write_line(" ^")
self.write_line("% Invalid input detected at '^' marker.")
else:
operation = args[3]
vlan_range = args[4]
self.update_trunk_vlans(operation, vlan_range)
return
elif "vlan".startswith(args[2]) and args[0] == "trunk":
if len(args) > 5:
self.write_line(" ^")
self.write_line("% Invalid input detected at '^' marker.")
else:
if args[0:4] == ("trunk", "allowed", "vlan", "add"):
if self.port.trunk_vlans is not None:
self.port.trunk_vlans = sorted(list(set(self.port.trunk_vlans + parse_vlan_list(args[4]))))
elif args[0:4] == ("trunk", "allowed", "vlan", "remove"):
if self.port.trunk_vlans is None:
self.port.trunk_vlans = range(1, 4097)
for v in parse_vlan_list(args[4]):
if v in self.port.trunk_vlans:
self.port.trunk_vlans.remove(v)
if len(self.port.trunk_vlans) == 0:
self.port.trunk_vlans = None
elif args[0:4] == ("trunk", "allowed", "vlan", "none"):
self.port.trunk_vlans = []
elif args[0:4] == ("trunk", "allowed", "vlan", "all"):
self.port.trunk_vlans = None
elif args[0:3] == ("trunk", "allowed", "vlan"):
self.port.trunk_vlans = parse_vlan_list(args[3])
elif args[0:3] == ("trunk", "native", "vlan"):
self.port.trunk_native_vlan = int(args[3])
elif "general".startswith(args[0]) and "pvid".startswith(args[1]):
self.set_trunk_native_vlan(int(args[2]))
self.write_line("")
def do_no_switchport(self, *args):
if "mode".startswith(args[0]):
self.set_switchport_mode("access")
elif "access".startswith(args[0]):
if "vlan".startswith(args[1]):
self.print_vlan_warning()
self.port.access_vlan = None
elif args[0] in ("trunk", "general") and args[1:3] == ("allowed", "vlan"):
self.port.trunk_vlans = None
elif "general".startswith(args[0]):
if "pvid".startswith(args[1]):
self.port.trunk_native_vlan = None
self.write_line("")
def do_mtu(self, *args):
self.write_line(" ^")
self.write_line("% Invalid input detected at '^' marker.")
self.write_line("")
def do_no_mtu(self, *args):
self.write_line(" ^")
self.write_line("% Invalid input detected at '^' marker.")
self.write_line("")
def set_switchport_mode(self, mode):
if mode not in ("access", "trunk", "general"):
self.write_line(" ^")
self.write_line("% Invalid input detected at '^' marker.")
else:
self.port.mode = mode
def set_trunk_native_vlan(self, native_vlan):
vlan = self.switch_configuration.get_vlan(native_vlan)
if vlan is None:
self.write_line("Could not configure pvid.")
else:
self.port.trunk_native_vlan = vlan.number
def print_vlan_warning(self):
pass
| 1.945313 | 2 |
setup.py | Spredzy/python-memsource | 0 | 17721 | <filename>setup.py
#!/usr/bin/env python
import setuptools
from memsource import version
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="memsource",
version=version.__version__,
author="<NAME>",
author_email="<EMAIL>",
description="Python bindings for Memsource",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Spredzy/python-memsource",
packages=setuptools.find_packages(),
install_requires=[
"requests"
],
classifiers=[
"Programming Language :: Python :: 3",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
],
python_requires=">=3.6",
)
| 1.289063 | 1 |
sktime/transformations/series/func_transform.py | marcio55afr/sktime | 2 | 17722 | <reponame>marcio55afr/sktime
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
"""Implements FunctionTransformer, a class to create custom transformers."""
__author__ = ["<NAME>"]
__all__ = ["FunctionTransformer"]
import numpy as np
from sktime.transformations.base import _SeriesToSeriesTransformer
from sktime.utils.validation.series import check_series
def _identity(X):
"""Return X."""
return X
class FunctionTransformer(_SeriesToSeriesTransformer):
r"""Constructs a transformer from an arbitrary callable.
A FunctionTransformer forwards its y (and optionally X) arguments to a
user-defined function or function object and returns the result of this
function. This is useful for stateless transformations such as taking the
log of frequencies, doing custom scaling, etc.
Note: If a lambda is used as the function, then the resulting
transformer will not be pickleable.
Parameters
----------
func : callable, default=None
The callable to use for the transformation. This will be passed
the same arguments as transform, with args and kwargs forwarded.
If func is None, then func will be the identity function.
inverse_func : callable, default=None
The callable to use for the inverse transformation. This will be
passed the same arguments as inverse transform, with args and
kwargs forwarded. If inverse_func is None, then inverse_func
will be the identity function.
check_inverse : bool, default=True
Whether to check that or ``func`` followed by ``inverse_func`` leads to
the original inputs. It can be used for a sanity check, raising a
warning when the condition is not fulfilled.
kw_args : dict, default=None
Dictionary of additional keyword arguments to pass to func.
inv_kw_args : dict, default=None
Dictionary of additional keyword arguments to pass to inverse_func.
See Also
--------
sktime.transformations.series.boxcox.LogTransformer :
Transformer input data using natural log. Can help normalize data and
compress variance of the series.
sktime.transformations.series.exponent.ExponentTransformer :
Transform input data by raising it to an exponent. Can help compress
variance of series if a fractional exponent is supplied.
sktime.transformations.series.exponent.SqrtTransformer :
Transform input data by taking its square root. Can help compress
variance of input series.
Examples
--------
>>> import numpy as np
>>> from sktime.transformations.series.func_transform import FunctionTransformer
>>> transformer = FunctionTransformer(np.log1p, np.expm1)
>>> X = np.array([[0, 1], [2, 3]])
>>> transformer.fit_transform(X)
array([[0. , 0.69314718],
[1.09861229, 1.38629436]])
"""
_tags = {
"handles-missing-data": True,
"fit-in-transform": False,
}
def __init__(
self,
func=None,
inverse_func=None,
*,
check_inverse=True,
kw_args=None,
inv_kw_args=None,
):
self.func = func
self.inverse_func = inverse_func
self.check_inverse = check_inverse
self.kw_args = kw_args
self.inv_kw_args = inv_kw_args
super(FunctionTransformer, self).__init__()
def _check_inverse_transform(self, Z):
"""Check that func and inverse_func are each other's inverse."""
Z_round_trip = self.inverse_func(self.func(Z))
if not np.allclose(Z_round_trip, Z, equal_nan=True):
raise UserWarning(
"The provided functions are not strictly"
" inverse of each other. If you are sure you"
" want to proceed regardless, set"
" 'check_inverse=False'."
)
def fit(self, Z, X=None):
"""Fit data.
Parameters
----------
Z : pd.Series / pd.DataFrame
Series / DataFrame to fit.
X : pd.DataFrame, optional (default=None)
Exogenous data used in transformation.
Returns
-------
self
"""
if self.check_inverse and not (self.func is None or self.inverse_func is None):
self._check_inverse_transform(Z)
self._is_fitted = True
return self
def transform(self, Z, X=None):
"""Transform data.
Parameters
----------
Z : pd.Series / pd.DataFrame
Series / DataFrame to transform.
X : pd.DataFrame, optional (default=None)
Exogenous data used in transformation.
Returns
-------
Zt : pd.Series / pd.DataFrame
Transformed data.
"""
self.check_is_fitted()
Z = check_series(Z)
return self._apply_function(Z, func=self.func, kw_args=self.kw_args)
def inverse_transform(self, Z, X=None):
"""Inverse transform data.
Parameters
----------
Z : pd.Series / pd.DataFrame
Series / DataFrame to transform.
X : pd.DataFrame, optional (default=None)
Exogenous data used in transformation.
Returns
-------
Zt : pd.Series / pd.DataFrame
Inverse transformed data.
"""
self.check_is_fitted()
Z = check_series(Z)
return self._apply_function(Z, func=self.inverse_func, kw_args=self.inv_kw_args)
def _apply_function(self, Z, func=None, kw_args=None):
if func is None:
func = _identity
return func(Z, **(kw_args if kw_args else {}))
| 3.109375 | 3 |
regparser/tree/xml_parser/reg_text.py | cfpb/regulations-parser | 36 | 17723 | # vim: set encoding=utf-8
import re
from lxml import etree
import logging
from regparser import content
from regparser.tree.depth import heuristics, rules, markers as mtypes
from regparser.tree.depth.derive import derive_depths
from regparser.tree.struct import Node
from regparser.tree.paragraph import p_level_of
from regparser.tree.xml_parser.appendices import build_non_reg_text
from regparser.tree import reg_text
from regparser.tree.xml_parser import tree_utils
from settings import PARAGRAPH_HIERARCHY
def get_reg_part(reg_doc):
"""
Depending on source, the CFR part number exists in different places. Fetch
it, wherever it is.
"""
potential_parts = []
potential_parts.extend(
# FR notice
node.attrib['PART'] for node in reg_doc.xpath('//REGTEXT'))
potential_parts.extend(
# e-CFR XML, under PART/EAR
node.text.replace('Pt.', '').strip()
for node in reg_doc.xpath('//PART/EAR')
if 'Pt.' in node.text)
potential_parts.extend(
# e-CFR XML, under FDSYS/HEADING
node.text.replace('PART', '').strip()
for node in reg_doc.xpath('//FDSYS/HEADING')
if 'PART' in node.text)
potential_parts.extend(
# e-CFR XML, under FDSYS/GRANULENUM
node.text.strip() for node in reg_doc.xpath('//FDSYS/GRANULENUM'))
potential_parts = [p for p in potential_parts if p.strip()]
if potential_parts:
return potential_parts[0]
def get_title(reg_doc):
""" Extract the title of the regulation. """
parent = reg_doc.xpath('//PART/HD')[0]
title = parent.text
return title
def preprocess_xml(xml):
"""This transforms the read XML through macros. Each macro consists of
an xpath and a replacement xml string"""
for path, replacement in content.Macros():
replacement = etree.fromstring('<ROOT>' + replacement + '</ROOT>')
for node in xml.xpath(path):
parent = node.getparent()
idx = parent.index(node)
parent.remove(node)
for repl in replacement:
parent.insert(idx, repl)
idx += 1
def build_tree(reg_xml):
if isinstance(reg_xml, str) or isinstance(reg_xml, unicode):
doc = etree.fromstring(reg_xml)
else:
doc = reg_xml
preprocess_xml(doc)
reg_part = get_reg_part(doc)
title = get_title(doc)
tree = Node("", [], [reg_part], title)
part = doc.xpath('//PART')[0]
subpart_xmls = [c for c in part.getchildren() if c.tag == 'SUBPART']
if len(subpart_xmls) > 0:
subparts = [build_subpart(reg_part, s) for s in subpart_xmls]
tree.children = subparts
else:
section_xmls = [c for c in part.getchildren() if c.tag == 'SECTION']
sections = []
for section_xml in section_xmls:
sections.extend(build_from_section(reg_part, section_xml))
empty_part = reg_text.build_empty_part(reg_part)
empty_part.children = sections
tree.children = [empty_part]
non_reg_sections = build_non_reg_text(doc, reg_part)
tree.children += non_reg_sections
return tree
def get_subpart_title(subpart_xml):
hds = subpart_xml.xpath('./HD|./RESERVED')
return [hd.text for hd in hds][0]
def build_subpart(reg_part, subpart_xml):
subpart_title = get_subpart_title(subpart_xml)
subpart = reg_text.build_subpart(subpart_title, reg_part)
sections = []
for ch in subpart_xml.getchildren():
if ch.tag == 'SECTION':
sections.extend(build_from_section(reg_part, ch))
subpart.children = sections
return subpart
# @profile
def get_markers(text):
""" Extract all the paragraph markers from text. Do some checks on the
collapsed markers."""
markers = tree_utils.get_paragraph_markers(text)
collapsed_markers = tree_utils.get_collapsed_markers(text)
# Check that the collapsed markers make sense (i.e. are at least one
# level below the initial marker)
if markers and collapsed_markers:
initial_marker_levels = p_level_of(markers[-1])
final_collapsed_markers = []
for collapsed_marker in collapsed_markers:
collapsed_marker_levels = p_level_of(collapsed_marker)
if any(c > f for f in initial_marker_levels
for c in collapsed_marker_levels):
final_collapsed_markers.append(collapsed_marker)
collapsed_markers = final_collapsed_markers
markers_list = [m for m in markers] + [m for m in collapsed_markers]
return markers_list
def get_markers_and_text(node, markers_list):
node_text = tree_utils.get_node_text(node, add_spaces=True)
text_with_tags = tree_utils.get_node_text_tags_preserved(node)
if len(markers_list) > 1:
actual_markers = ['(%s)' % m for m in markers_list]
plain_markers = [m.replace('<E T="03">', '').replace('</E>', '')
for m in actual_markers]
node_texts = tree_utils.split_text(node_text, plain_markers)
tagged_texts = tree_utils.split_text(text_with_tags, actual_markers)
node_text_list = zip(node_texts, tagged_texts)
elif markers_list:
node_text_list = [(node_text, text_with_tags)]
else:
node_text_list = [('', '')]
return zip(markers_list, node_text_list)
def next_marker(xml_node, remaining_markers):
"""Try to determine the marker following the current xml_node. Remaining
markers is a list of other marks *within* the xml_node. May return
None"""
# More markers in this xml node
if remaining_markers:
return remaining_markers[0][0]
# Check the next xml node; skip over stars
sib = xml_node.getnext()
while sib is not None and sib.tag in ('STARS', 'PRTPAGE'):
sib = sib.getnext()
if sib is not None:
next_text = tree_utils.get_node_text(sib)
next_markers = get_markers(next_text)
if next_markers:
return next_markers[0]
def build_from_section(reg_part, section_xml):
section_texts = []
nodes = []
section_no = section_xml.xpath('SECTNO')[0].text
section_no_without_marker = re.search('[0-9]+\.[0-9]+',
section_no).group(0)
subject_xml = section_xml.xpath('SUBJECT')
if not subject_xml:
subject_xml = section_xml.xpath('RESERVED')
subject_text = subject_xml[0].text
manual_hierarchy = []
if (reg_part in PARAGRAPH_HIERARCHY
and section_no_without_marker in PARAGRAPH_HIERARCHY[reg_part]):
manual_hierarchy = PARAGRAPH_HIERARCHY[reg_part][
section_no_without_marker]
# Collect paragraph markers and section text (intro text for the
# section)
i = 0
children = [ch for ch in section_xml.getchildren()
if ch.tag in ['P', 'STARS']]
for ch in children:
text = tree_utils.get_node_text(ch, add_spaces=True)
tagged_text = tree_utils.get_node_text_tags_preserved(ch)
markers_list = get_markers(tagged_text.strip())
# If the child has a 'DEPTH' attribute, we're in manual
# hierarchy mode, just constructed from the XML instead of
# specified in configuration.
# This presumes that every child in the section has DEPTH
# specified, if not, things will break in and around
# derive_depths below.
if ch.get("depth") is not None:
manual_hierarchy.append(int(ch.get("depth")))
if ch.tag == 'STARS':
nodes.append(Node(label=[mtypes.STARS_TAG]))
elif not markers_list and manual_hierarchy:
# is this a bunch of definitions that don't have numbers next to
# them?
if len(nodes) > 0:
if (subject_text.find('Definitions.') > -1
or nodes[-1].text.find(
'For the purposes of this section')):
# TODO: create a grammar for definitions
if text.find('means') > -1:
def_marker = text.split('means')[0].strip().split()
def_marker = ''.join([word[0].upper() + word[1:]
for word in def_marker])
elif text.find('shall have the same meaning') > -1:
def_marker = text.split('shall')[0].strip().split()
def_marker = ''.join([word[0].upper() + word[1:]
for word in def_marker])
else:
def_marker = 'def{0}'.format(i)
i += 1
n = Node(text, label=[def_marker], source_xml=ch)
n.tagged_text = tagged_text
nodes.append(n)
else:
section_texts.append((text, tagged_text))
else:
if len(children) > 1:
def_marker = 'def{0}'.format(i)
n = Node(text, [], [def_marker], source_xml=ch)
n.tagged_text = tagged_text
i += 1
nodes.append(n)
else:
# this is the only node around
section_texts.append((text, tagged_text))
elif not markers_list and not manual_hierarchy:
# No manual heirarchy specified, append to the section.
section_texts.append((text, tagged_text))
else:
for m, node_text in get_markers_and_text(ch, markers_list):
n = Node(node_text[0], [], [m], source_xml=ch)
n.tagged_text = unicode(node_text[1])
nodes.append(n)
if node_text[0].endswith('* * *'):
nodes.append(Node(label=[mtypes.INLINE_STARS]))
# Trailing stars don't matter; slightly more efficient to ignore them
while nodes and nodes[-1].label[0] in mtypes.stars:
nodes = nodes[:-1]
m_stack = tree_utils.NodeStack()
# Use constraint programming to figure out possible depth assignments
if not manual_hierarchy:
depths = derive_depths(
[node.label[0] for node in nodes],
[rules.depth_type_order([mtypes.lower, mtypes.ints, mtypes.roman,
mtypes.upper, mtypes.em_ints,
mtypes.em_roman])])
if not manual_hierarchy and depths:
# Find the assignment which violates the least of our heuristics
depths = heuristics.prefer_multiple_children(depths, 0.5)
depths = sorted(depths, key=lambda d: d.weight, reverse=True)
depths = depths[0]
for node, par in zip(nodes, depths):
if par.typ != mtypes.stars:
last = m_stack.peek()
node.label = [l.replace('<E T="03">', '').replace('</E>', '')
for l in node.label]
if len(last) == 0:
m_stack.push_last((1 + par.depth, node))
else:
m_stack.add(1 + par.depth, node)
elif nodes and manual_hierarchy:
logging.warning('Using manual depth hierarchy.')
depths = manual_hierarchy
if len(nodes) == len(depths):
for node, spec in zip(nodes, depths):
if isinstance(spec, int):
depth = spec
elif isinstance(spec, tuple):
depth, marker = spec
node.marker = marker
last = m_stack.peek()
node.label = [l.replace('<E T="03">', '').replace('</E>', '')
for l in node.label]
if len(last) == 0:
m_stack.push_last((1 + depth, node))
else:
m_stack.add(1 + depth, node)
else:
logging.error('Manual hierarchy length does not match node '
'list length! ({0} nodes but {1} provided, '
'{2})'.format(
len(nodes),
len(depths),
[x.label[0] for x in nodes]))
elif nodes and not manual_hierarchy:
logging.warning(
'Could not determine depth when parsing {0}:\n{1}'.format(
section_no_without_marker, [node.label[0] for node in nodes]))
for node in nodes:
last = m_stack.peek()
node.label = [l.replace('<E T="03">', '').replace('</E>', '')
for l in node.label]
if len(last) == 0:
m_stack.push_last((3, node))
else:
m_stack.add(3, node)
nodes = []
section_nums = []
for match in re.finditer(r'%s\.(\d+)' % reg_part, section_no):
section_nums.append(int(match.group(1)))
# Span of section numbers
if u'§§' == section_no[:2] and '-' in section_no:
first, last = section_nums
section_nums = []
for i in range(first, last + 1):
section_nums.append(i)
for section_number in section_nums:
section_number = str(section_number)
plain_sect_texts = [s[0] for s in section_texts]
tagged_sect_texts = [s[1] for s in section_texts]
section_title = u"§ " + reg_part + "." + section_number
if subject_text:
section_title += " " + subject_text
section_text = ' '.join([section_xml.text] + plain_sect_texts)
tagged_section_text = ' '.join([section_xml.text] + tagged_sect_texts)
sect_node = Node(section_text, label=[reg_part, section_number],
title=section_title)
sect_node.tagged_text = tagged_section_text
m_stack.add_to_bottom((1, sect_node))
while m_stack.size() > 1:
m_stack.unwind()
nodes.append(m_stack.pop()[0][1])
return nodes
| 2.265625 | 2 |
setup.py | mark-dawn/stytra | 0 | 17724 | from distutils.core import setup
from setuptools import find_packages
setup(
name="stytra",
version="0.1",
author="<NAME>, <NAME> @portugueslab",
author_email="<EMAIL>",
license="MIT",
packages=find_packages(),
install_requires=[
"pyqtgraph>=0.10.0",
"numpy",
"numba",
"matplotlib",
"pandas",
"qdarkstyle",
"qimage2ndarray",
"deepdish",
"param",
"pims",
"GitPython",
"pymongo",
"colorspacious",
"arrayqueues",
],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
keywords="tracking processing",
description="A modular package to control stimulation and track behaviour in zebrafish experiments.",
project_urls={
"Source": "https://github.com/portugueslab/stytra",
"Tracker": "https://github.com/portugueslab/stytra/issues",
},
)
| 1.179688 | 1 |
checkTicTacToe/checkTicTacToe.py | nate-ar-williams/coding-questions | 0 | 17725 | #!/usr/bin/python3
# let board be 3x3 bool array
def isWin(board):
start = board[0][0]
win = False
next = [(0, 1), (1, 1), (1, 0)]
while(!win):
while
return win
def main():
pass
if __name__ == '__main__':
main()
| 3.65625 | 4 |
openerp/exceptions.py | ntiufalara/openerp7 | 3 | 17726 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" OpenERP core exceptions.
This module defines a few exception types. Those types are understood by the
RPC layer. Any other exception type bubbling until the RPC layer will be
treated as a 'Server error'.
"""
class Warning(Exception):
pass
class AccessDenied(Exception):
""" Login/password error. No message, no traceback. """
def __init__(self):
super(AccessDenied, self).__init__('Access denied.')
self.traceback = ('', '', '')
class AccessError(Exception):
""" Access rights error. """
class DeferredException(Exception):
""" Exception object holding a traceback for asynchronous reporting.
Some RPC calls (database creation and report generation) happen with
an initial request followed by multiple, polling requests. This class
is used to store the possible exception occuring in the thread serving
the first request, and is then sent to a polling request.
('Traceback' is misleading, this is really a exc_info() triple.)
"""
def __init__(self, msg, tb):
self.message = msg
self.traceback = tb
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 1.820313 | 2 |
MainUi.py | james646-hs/Fgo_teamup | 18 | 17727 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainUi.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(1070, 837)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(0, 0))
MainWindow.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setContentsMargins(5, 10, 5, 5)
self.verticalLayout_2.setSpacing(5)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_3.setContentsMargins(5, 5, 5, 5)
self.verticalLayout_3.setSpacing(5)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_costume_state_4 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_costume_state_4.sizePolicy().hasHeightForWidth())
self.label_costume_state_4.setSizePolicy(sizePolicy)
self.label_costume_state_4.setMaximumSize(QtCore.QSize(16777215, 28))
self.label_costume_state_4.setObjectName("label_costume_state_4")
self.gridLayout_3.addWidget(self.label_costume_state_4, 4, 4, 1, 1)
self.label_servant_state_2 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_servant_state_2.sizePolicy().hasHeightForWidth())
self.label_servant_state_2.setSizePolicy(sizePolicy)
self.label_servant_state_2.setMinimumSize(QtCore.QSize(110, 65))
self.label_servant_state_2.setMaximumSize(QtCore.QSize(16777215, 65))
self.label_servant_state_2.setObjectName("label_servant_state_2")
self.gridLayout_3.addWidget(self.label_servant_state_2, 2, 1, 1, 1)
self.line_7 = QtWidgets.QFrame(self.groupBox)
self.line_7.setFrameShape(QtWidgets.QFrame.VLine)
self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_7.setObjectName("line_7")
self.gridLayout_3.addWidget(self.line_7, 0, 7, 1, 1)
self.label_costume_state_1 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_costume_state_1.sizePolicy().hasHeightForWidth())
self.label_costume_state_1.setSizePolicy(sizePolicy)
self.label_costume_state_1.setMaximumSize(QtCore.QSize(16777212, 28))
self.label_costume_state_1.setObjectName("label_costume_state_1")
self.gridLayout_3.addWidget(self.label_costume_state_1, 4, 0, 1, 1)
self.box_skill_confirm = QtWidgets.QCheckBox(self.groupBox)
self.box_skill_confirm.setObjectName("box_skill_confirm")
self.gridLayout_3.addWidget(self.box_skill_confirm, 4, 8, 1, 1)
self.horizontalLayout_29 = QtWidgets.QHBoxLayout()
self.horizontalLayout_29.setObjectName("horizontalLayout_29")
self.btn_select_servant_5 = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_select_servant_5.sizePolicy().hasHeightForWidth())
self.btn_select_servant_5.setSizePolicy(sizePolicy)
self.btn_select_servant_5.setMinimumSize(QtCore.QSize(92, 100))
self.btn_select_servant_5.setMaximumSize(QtCore.QSize(92, 100))
self.btn_select_servant_5.setText("")
self.btn_select_servant_5.setIconSize(QtCore.QSize(92, 100))
self.btn_select_servant_5.setObjectName("btn_select_servant_5")
self.horizontalLayout_29.addWidget(self.btn_select_servant_5)
self.gridLayout_3.addLayout(self.horizontalLayout_29, 0, 5, 1, 1)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.horizontalLayout_3.addWidget(self.label)
self.spinbox_required_prob = QtWidgets.QSpinBox(self.groupBox)
self.spinbox_required_prob.setMaximum(100)
self.spinbox_required_prob.setProperty("value", 100)
self.spinbox_required_prob.setObjectName("spinbox_required_prob")
self.horizontalLayout_3.addWidget(self.spinbox_required_prob)
self.gridLayout_3.addLayout(self.horizontalLayout_3, 3, 8, 1, 1)
self.horizontalLayout_24 = QtWidgets.QHBoxLayout()
self.horizontalLayout_24.setObjectName("horizontalLayout_24")
self.btn_select_servant_1 = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_select_servant_1.sizePolicy().hasHeightForWidth())
self.btn_select_servant_1.setSizePolicy(sizePolicy)
self.btn_select_servant_1.setMinimumSize(QtCore.QSize(92, 100))
self.btn_select_servant_1.setMaximumSize(QtCore.QSize(92, 100))
self.btn_select_servant_1.setText("")
self.btn_select_servant_1.setIconSize(QtCore.QSize(92, 100))
self.btn_select_servant_1.setObjectName("btn_select_servant_1")
self.horizontalLayout_24.addWidget(self.btn_select_servant_1)
self.gridLayout_3.addLayout(self.horizontalLayout_24, 0, 0, 1, 1)
self.label_costume_state_5 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_costume_state_5.sizePolicy().hasHeightForWidth())
self.label_costume_state_5.setSizePolicy(sizePolicy)
self.label_costume_state_5.setMaximumSize(QtCore.QSize(16777215, 28))
self.label_costume_state_5.setObjectName("label_costume_state_5")
self.gridLayout_3.addWidget(self.label_costume_state_5, 4, 5, 1, 1)
self.label_costume_state_6 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_costume_state_6.sizePolicy().hasHeightForWidth())
self.label_costume_state_6.setSizePolicy(sizePolicy)
self.label_costume_state_6.setMaximumSize(QtCore.QSize(16777215, 28))
self.label_costume_state_6.setObjectName("label_costume_state_6")
self.gridLayout_3.addWidget(self.label_costume_state_6, 4, 6, 1, 1)
self.horizontalLayout_26 = QtWidgets.QHBoxLayout()
self.horizontalLayout_26.setObjectName("horizontalLayout_26")
self.btn_select_servant_2 = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_select_servant_2.sizePolicy().hasHeightForWidth())
self.btn_select_servant_2.setSizePolicy(sizePolicy)
self.btn_select_servant_2.setMinimumSize(QtCore.QSize(92, 100))
self.btn_select_servant_2.setMaximumSize(QtCore.QSize(92, 100))
self.btn_select_servant_2.setText("")
self.btn_select_servant_2.setIconSize(QtCore.QSize(92, 100))
self.btn_select_servant_2.setObjectName("btn_select_servant_2")
self.horizontalLayout_26.addWidget(self.btn_select_servant_2)
self.gridLayout_3.addLayout(self.horizontalLayout_26, 0, 1, 1, 1)
self.horizontalLayout_23 = QtWidgets.QHBoxLayout()
self.horizontalLayout_23.setObjectName("horizontalLayout_23")
self.btn_select_master = QtWidgets.QPushButton(self.groupBox)
self.btn_select_master.setMinimumSize(QtCore.QSize(92, 100))
self.btn_select_master.setMaximumSize(QtCore.QSize(92, 100))
self.btn_select_master.setText("")
self.btn_select_master.setIconSize(QtCore.QSize(100, 100))
self.btn_select_master.setObjectName("btn_select_master")
self.horizontalLayout_23.addWidget(self.btn_select_master)
self.gridLayout_3.addLayout(self.horizontalLayout_23, 0, 8, 1, 1)
self.label_servant_state_1 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_servant_state_1.sizePolicy().hasHeightForWidth())
self.label_servant_state_1.setSizePolicy(sizePolicy)
self.label_servant_state_1.setMinimumSize(QtCore.QSize(110, 65))
self.label_servant_state_1.setMaximumSize(QtCore.QSize(16777215, 65))
self.label_servant_state_1.setObjectName("label_servant_state_1")
self.gridLayout_3.addWidget(self.label_servant_state_1, 2, 0, 1, 1)
self.label_master_state = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_master_state.sizePolicy().hasHeightForWidth())
self.label_master_state.setSizePolicy(sizePolicy)
self.label_master_state.setMinimumSize(QtCore.QSize(0, 0))
self.label_master_state.setMaximumSize(QtCore.QSize(16777215, 28))
self.label_master_state.setObjectName("label_master_state")
self.gridLayout_3.addWidget(self.label_master_state, 2, 8, 1, 1)
self.horizontalLayout_38 = QtWidgets.QHBoxLayout()
self.horizontalLayout_38.setObjectName("horizontalLayout_38")
self.btn_select_costume_3 = QtWidgets.QPushButton(self.groupBox)
self.btn_select_costume_3.setMinimumSize(QtCore.QSize(100, 45))
self.btn_select_costume_3.setMaximumSize(QtCore.QSize(100, 45))
self.btn_select_costume_3.setText("")
self.btn_select_costume_3.setIconSize(QtCore.QSize(100, 150))
self.btn_select_costume_3.setObjectName("btn_select_costume_3")
self.horizontalLayout_38.addWidget(self.btn_select_costume_3)
self.gridLayout_3.addLayout(self.horizontalLayout_38, 3, 2, 1, 1)
self.label_costume_state_2 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_costume_state_2.sizePolicy().hasHeightForWidth())
self.label_costume_state_2.setSizePolicy(sizePolicy)
self.label_costume_state_2.setMaximumSize(QtCore.QSize(16777215, 28))
self.label_costume_state_2.setObjectName("label_costume_state_2")
self.gridLayout_3.addWidget(self.label_costume_state_2, 4, 1, 1, 1)
self.horizontalLayout_28 = QtWidgets.QHBoxLayout()
self.horizontalLayout_28.setObjectName("horizontalLayout_28")
self.btn_select_servant_4 = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_select_servant_4.sizePolicy().hasHeightForWidth())
self.btn_select_servant_4.setSizePolicy(sizePolicy)
self.btn_select_servant_4.setMinimumSize(QtCore.QSize(92, 100))
self.btn_select_servant_4.setMaximumSize(QtCore.QSize(92, 100))
self.btn_select_servant_4.setText("")
self.btn_select_servant_4.setIconSize(QtCore.QSize(92, 100))
self.btn_select_servant_4.setObjectName("btn_select_servant_4")
self.horizontalLayout_28.addWidget(self.btn_select_servant_4)
self.gridLayout_3.addLayout(self.horizontalLayout_28, 0, 4, 1, 1)
self.horizontalLayout_36 = QtWidgets.QHBoxLayout()
self.horizontalLayout_36.setObjectName("horizontalLayout_36")
self.btn_select_costume_2 = QtWidgets.QPushButton(self.groupBox)
self.btn_select_costume_2.setMinimumSize(QtCore.QSize(100, 45))
self.btn_select_costume_2.setMaximumSize(QtCore.QSize(100, 45))
self.btn_select_costume_2.setText("")
self.btn_select_costume_2.setIconSize(QtCore.QSize(100, 150))
self.btn_select_costume_2.setObjectName("btn_select_costume_2")
self.horizontalLayout_36.addWidget(self.btn_select_costume_2)
self.gridLayout_3.addLayout(self.horizontalLayout_36, 3, 1, 1, 1)
self.horizontalLayout_46 = QtWidgets.QHBoxLayout()
self.horizontalLayout_46.setObjectName("horizontalLayout_46")
self.btn_select_costume_1 = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_select_costume_1.sizePolicy().hasHeightForWidth())
self.btn_select_costume_1.setSizePolicy(sizePolicy)
self.btn_select_costume_1.setMinimumSize(QtCore.QSize(100, 45))
self.btn_select_costume_1.setMaximumSize(QtCore.QSize(100, 45))
self.btn_select_costume_1.setText("")
self.btn_select_costume_1.setIconSize(QtCore.QSize(100, 150))
self.btn_select_costume_1.setObjectName("btn_select_costume_1")
self.horizontalLayout_46.addWidget(self.btn_select_costume_1)
self.gridLayout_3.addLayout(self.horizontalLayout_46, 3, 0, 1, 1)
self.label_servant_state_3 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_servant_state_3.sizePolicy().hasHeightForWidth())
self.label_servant_state_3.setSizePolicy(sizePolicy)
self.label_servant_state_3.setMinimumSize(QtCore.QSize(110, 65))
self.label_servant_state_3.setMaximumSize(QtCore.QSize(16777215, 65))
self.label_servant_state_3.setObjectName("label_servant_state_3")
self.gridLayout_3.addWidget(self.label_servant_state_3, 2, 2, 1, 1)
self.label_servant_state_5 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_servant_state_5.sizePolicy().hasHeightForWidth())
self.label_servant_state_5.setSizePolicy(sizePolicy)
self.label_servant_state_5.setMinimumSize(QtCore.QSize(110, 65))
self.label_servant_state_5.setMaximumSize(QtCore.QSize(16777215, 65))
self.label_servant_state_5.setObjectName("label_servant_state_5")
self.gridLayout_3.addWidget(self.label_servant_state_5, 2, 5, 1, 1)
self.horizontalLayout_44 = QtWidgets.QHBoxLayout()
self.horizontalLayout_44.setObjectName("horizontalLayout_44")
self.btn_select_costume_6 = QtWidgets.QPushButton(self.groupBox)
self.btn_select_costume_6.setMinimumSize(QtCore.QSize(100, 45))
self.btn_select_costume_6.setMaximumSize(QtCore.QSize(100, 45))
self.btn_select_costume_6.setText("")
self.btn_select_costume_6.setIconSize(QtCore.QSize(100, 150))
self.btn_select_costume_6.setObjectName("btn_select_costume_6")
self.horizontalLayout_44.addWidget(self.btn_select_costume_6)
self.gridLayout_3.addLayout(self.horizontalLayout_44, 3, 6, 1, 1)
self.horizontalLayout_27 = QtWidgets.QHBoxLayout()
self.horizontalLayout_27.setObjectName("horizontalLayout_27")
self.btn_select_servant_3 = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_select_servant_3.sizePolicy().hasHeightForWidth())
self.btn_select_servant_3.setSizePolicy(sizePolicy)
self.btn_select_servant_3.setMinimumSize(QtCore.QSize(92, 100))
self.btn_select_servant_3.setMaximumSize(QtCore.QSize(92, 100))
self.btn_select_servant_3.setText("")
self.btn_select_servant_3.setIconSize(QtCore.QSize(92, 100))
self.btn_select_servant_3.setObjectName("btn_select_servant_3")
self.horizontalLayout_27.addWidget(self.btn_select_servant_3)
self.gridLayout_3.addLayout(self.horizontalLayout_27, 0, 2, 1, 1)
self.label_costume_state_3 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_costume_state_3.sizePolicy().hasHeightForWidth())
self.label_costume_state_3.setSizePolicy(sizePolicy)
self.label_costume_state_3.setMaximumSize(QtCore.QSize(16777215, 28))
self.label_costume_state_3.setObjectName("label_costume_state_3")
self.gridLayout_3.addWidget(self.label_costume_state_3, 4, 2, 1, 1)
self.label_servant_state_4 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_servant_state_4.sizePolicy().hasHeightForWidth())
self.label_servant_state_4.setSizePolicy(sizePolicy)
self.label_servant_state_4.setMinimumSize(QtCore.QSize(110, 65))
self.label_servant_state_4.setMaximumSize(QtCore.QSize(16777215, 65))
self.label_servant_state_4.setObjectName("label_servant_state_4")
self.gridLayout_3.addWidget(self.label_servant_state_4, 2, 4, 1, 1)
self.line_8 = QtWidgets.QFrame(self.groupBox)
self.line_8.setFrameShape(QtWidgets.QFrame.VLine)
self.line_8.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_8.setObjectName("line_8")
self.gridLayout_3.addWidget(self.line_8, 3, 7, 1, 1)
self.horizontalLayout_40 = QtWidgets.QHBoxLayout()
self.horizontalLayout_40.setObjectName("horizontalLayout_40")
self.btn_select_costume_4 = QtWidgets.QPushButton(self.groupBox)
self.btn_select_costume_4.setMinimumSize(QtCore.QSize(100, 45))
self.btn_select_costume_4.setMaximumSize(QtCore.QSize(100, 45))
self.btn_select_costume_4.setText("")
self.btn_select_costume_4.setIconSize(QtCore.QSize(100, 150))
self.btn_select_costume_4.setObjectName("btn_select_costume_4")
self.horizontalLayout_40.addWidget(self.btn_select_costume_4)
self.gridLayout_3.addLayout(self.horizontalLayout_40, 3, 4, 1, 1)
self.label_servant_state_6 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_servant_state_6.sizePolicy().hasHeightForWidth())
self.label_servant_state_6.setSizePolicy(sizePolicy)
self.label_servant_state_6.setMinimumSize(QtCore.QSize(110, 65))
self.label_servant_state_6.setMaximumSize(QtCore.QSize(16777215, 65))
self.label_servant_state_6.setObjectName("label_servant_state_6")
self.gridLayout_3.addWidget(self.label_servant_state_6, 2, 6, 1, 1)
self.line_3 = QtWidgets.QFrame(self.groupBox)
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.gridLayout_3.addWidget(self.line_3, 0, 3, 1, 1)
self.horizontalLayout_42 = QtWidgets.QHBoxLayout()
self.horizontalLayout_42.setObjectName("horizontalLayout_42")
self.btn_select_costume_5 = QtWidgets.QPushButton(self.groupBox)
self.btn_select_costume_5.setMinimumSize(QtCore.QSize(100, 45))
self.btn_select_costume_5.setMaximumSize(QtCore.QSize(100, 45))
self.btn_select_costume_5.setText("")
self.btn_select_costume_5.setIconSize(QtCore.QSize(100, 150))
self.btn_select_costume_5.setObjectName("btn_select_costume_5")
self.horizontalLayout_42.addWidget(self.btn_select_costume_5)
self.gridLayout_3.addLayout(self.horizontalLayout_42, 3, 5, 1, 1)
self.horizontalLayout_30 = QtWidgets.QHBoxLayout()
self.horizontalLayout_30.setObjectName("horizontalLayout_30")
self.btn_select_servant_6 = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_select_servant_6.sizePolicy().hasHeightForWidth())
self.btn_select_servant_6.setSizePolicy(sizePolicy)
self.btn_select_servant_6.setMinimumSize(QtCore.QSize(92, 100))
self.btn_select_servant_6.setMaximumSize(QtCore.QSize(92, 100))
self.btn_select_servant_6.setText("")
self.btn_select_servant_6.setIconSize(QtCore.QSize(92, 100))
self.btn_select_servant_6.setObjectName("btn_select_servant_6")
self.horizontalLayout_30.addWidget(self.btn_select_servant_6)
self.gridLayout_3.addLayout(self.horizontalLayout_30, 0, 6, 1, 1)
self.line_4 = QtWidgets.QFrame(self.groupBox)
self.line_4.setFrameShape(QtWidgets.QFrame.VLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.gridLayout_3.addWidget(self.line_4, 3, 3, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.btn_set_progress = QtWidgets.QPushButton(self.groupBox)
self.btn_set_progress.setObjectName("btn_set_progress")
self.horizontalLayout_4.addWidget(self.btn_set_progress)
self.btn_choose_level = QtWidgets.QPushButton(self.groupBox)
self.btn_choose_level.setObjectName("btn_choose_level")
self.horizontalLayout_4.addWidget(self.btn_choose_level)
self.btn_confirm_team = QtWidgets.QPushButton(self.groupBox)
self.btn_confirm_team.setObjectName("btn_confirm_team")
self.horizontalLayout_4.addWidget(self.btn_confirm_team)
self.btn_change_team = QtWidgets.QPushButton(self.groupBox)
self.btn_change_team.setEnabled(False)
self.btn_change_team.setObjectName("btn_change_team")
self.horizontalLayout_4.addWidget(self.btn_change_team)
self.btn_round_reset = QtWidgets.QPushButton(self.groupBox)
self.btn_round_reset.setEnabled(False)
self.btn_round_reset.setObjectName("btn_round_reset")
self.horizontalLayout_4.addWidget(self.btn_round_reset)
self.verticalLayout_3.addLayout(self.horizontalLayout_4)
self.verticalLayout_2.addWidget(self.groupBox)
self.horizontalLayout_15 = QtWidgets.QHBoxLayout()
self.horizontalLayout_15.setObjectName("horizontalLayout_15")
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout_6.setContentsMargins(5, 5, 5, 5)
self.verticalLayout_6.setSpacing(5)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.gridLayout_7 = QtWidgets.QGridLayout()
self.gridLayout_7.setObjectName("gridLayout_7")
self.round1_enemy3_class = QtWidgets.QLabel(self.groupBox_2)
self.round1_enemy3_class.setMinimumSize(QtCore.QSize(150, 0))
self.round1_enemy3_class.setMaximumSize(QtCore.QSize(150, 16777215))
self.round1_enemy3_class.setText("")
self.round1_enemy3_class.setObjectName("round1_enemy3_class")
self.gridLayout_7.addWidget(self.round1_enemy3_class, 1, 0, 1, 1)
self.round3_enemy1_class = QtWidgets.QLabel(self.groupBox_2)
self.round3_enemy1_class.setMinimumSize(QtCore.QSize(150, 0))
self.round3_enemy1_class.setMaximumSize(QtCore.QSize(150, 16777215))
self.round3_enemy1_class.setText("")
self.round3_enemy1_class.setObjectName("round3_enemy1_class")
self.gridLayout_7.addWidget(self.round3_enemy1_class, 7, 2, 1, 1)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.round2_enemy1_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round2_enemy1_pic.setEnabled(False)
self.round2_enemy1_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round2_enemy1_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round2_enemy1_pic.setText("")
self.round2_enemy1_pic.setIconSize(QtCore.QSize(64, 64))
self.round2_enemy1_pic.setObjectName("round2_enemy1_pic")
self.horizontalLayout_10.addWidget(self.round2_enemy1_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_10, 3, 2, 1, 1)
self.round3_enemy1_health = QtWidgets.QLabel(self.groupBox_2)
self.round3_enemy1_health.setMinimumSize(QtCore.QSize(150, 0))
self.round3_enemy1_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round3_enemy1_health.setText("")
self.round3_enemy1_health.setObjectName("round3_enemy1_health")
self.gridLayout_7.addWidget(self.round3_enemy1_health, 8, 2, 1, 1)
self.horizontalLayout_20 = QtWidgets.QHBoxLayout()
self.horizontalLayout_20.setObjectName("horizontalLayout_20")
self.round3_enemy2_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round3_enemy2_pic.setEnabled(False)
self.round3_enemy2_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round3_enemy2_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round3_enemy2_pic.setText("")
self.round3_enemy2_pic.setIconSize(QtCore.QSize(64, 64))
self.round3_enemy2_pic.setObjectName("round3_enemy2_pic")
self.horizontalLayout_20.addWidget(self.round3_enemy2_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_20, 6, 1, 1, 1)
self.round2_enemy3_class = QtWidgets.QLabel(self.groupBox_2)
self.round2_enemy3_class.setMinimumSize(QtCore.QSize(150, 0))
self.round2_enemy3_class.setMaximumSize(QtCore.QSize(150, 16777215))
self.round2_enemy3_class.setText("")
self.round2_enemy3_class.setObjectName("round2_enemy3_class")
self.gridLayout_7.addWidget(self.round2_enemy3_class, 4, 0, 1, 1)
self.round2_enemy2_class = QtWidgets.QLabel(self.groupBox_2)
self.round2_enemy2_class.setMinimumSize(QtCore.QSize(150, 0))
self.round2_enemy2_class.setMaximumSize(QtCore.QSize(150, 16777215))
self.round2_enemy2_class.setText("")
self.round2_enemy2_class.setObjectName("round2_enemy2_class")
self.gridLayout_7.addWidget(self.round2_enemy2_class, 4, 1, 1, 1)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.round2_enemy3_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round2_enemy3_pic.setEnabled(False)
self.round2_enemy3_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round2_enemy3_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round2_enemy3_pic.setText("")
self.round2_enemy3_pic.setIconSize(QtCore.QSize(64, 64))
self.round2_enemy3_pic.setObjectName("round2_enemy3_pic")
self.horizontalLayout_6.addWidget(self.round2_enemy3_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_6, 3, 0, 1, 1)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.round2_enemy2_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round2_enemy2_pic.setEnabled(False)
self.round2_enemy2_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round2_enemy2_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round2_enemy2_pic.setText("")
self.round2_enemy2_pic.setIconSize(QtCore.QSize(64, 64))
self.round2_enemy2_pic.setObjectName("round2_enemy2_pic")
self.horizontalLayout_7.addWidget(self.round2_enemy2_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_7, 3, 1, 1, 1)
self.horizontalLayout_21 = QtWidgets.QHBoxLayout()
self.horizontalLayout_21.setObjectName("horizontalLayout_21")
self.round3_enemy3_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round3_enemy3_pic.setEnabled(False)
self.round3_enemy3_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round3_enemy3_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round3_enemy3_pic.setText("")
self.round3_enemy3_pic.setIconSize(QtCore.QSize(64, 64))
self.round3_enemy3_pic.setObjectName("round3_enemy3_pic")
self.horizontalLayout_21.addWidget(self.round3_enemy3_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_21, 6, 0, 1, 1)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.round1_enemy2_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round1_enemy2_pic.setEnabled(False)
self.round1_enemy2_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round1_enemy2_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round1_enemy2_pic.setText("")
self.round1_enemy2_pic.setIconSize(QtCore.QSize(64, 64))
self.round1_enemy2_pic.setObjectName("round1_enemy2_pic")
self.horizontalLayout_2.addWidget(self.round1_enemy2_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_2, 0, 1, 1, 1)
self.round3_enemy3_class = QtWidgets.QLabel(self.groupBox_2)
self.round3_enemy3_class.setMinimumSize(QtCore.QSize(150, 0))
self.round3_enemy3_class.setMaximumSize(QtCore.QSize(150, 28))
self.round3_enemy3_class.setText("")
self.round3_enemy3_class.setObjectName("round3_enemy3_class")
self.gridLayout_7.addWidget(self.round3_enemy3_class, 7, 0, 1, 1)
self.round1_enemy2_class = QtWidgets.QLabel(self.groupBox_2)
self.round1_enemy2_class.setMinimumSize(QtCore.QSize(150, 0))
self.round1_enemy2_class.setMaximumSize(QtCore.QSize(150, 28))
self.round1_enemy2_class.setText("")
self.round1_enemy2_class.setObjectName("round1_enemy2_class")
self.gridLayout_7.addWidget(self.round1_enemy2_class, 1, 1, 1, 1)
self.round3_enemy3_health = QtWidgets.QLabel(self.groupBox_2)
self.round3_enemy3_health.setMinimumSize(QtCore.QSize(150, 0))
self.round3_enemy3_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round3_enemy3_health.setText("")
self.round3_enemy3_health.setObjectName("round3_enemy3_health")
self.gridLayout_7.addWidget(self.round3_enemy3_health, 8, 0, 1, 1)
self.round1_enemy3_health = QtWidgets.QLabel(self.groupBox_2)
self.round1_enemy3_health.setMinimumSize(QtCore.QSize(150, 0))
self.round1_enemy3_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round1_enemy3_health.setText("")
self.round1_enemy3_health.setObjectName("round1_enemy3_health")
self.gridLayout_7.addWidget(self.round1_enemy3_health, 2, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.round1_enemy1_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round1_enemy1_pic.setEnabled(False)
self.round1_enemy1_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round1_enemy1_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round1_enemy1_pic.setText("")
self.round1_enemy1_pic.setIconSize(QtCore.QSize(64, 64))
self.round1_enemy1_pic.setObjectName("round1_enemy1_pic")
self.horizontalLayout.addWidget(self.round1_enemy1_pic)
self.gridLayout_7.addLayout(self.horizontalLayout, 0, 2, 1, 1)
self.round2_enemy3_health = QtWidgets.QLabel(self.groupBox_2)
self.round2_enemy3_health.setMinimumSize(QtCore.QSize(150, 0))
self.round2_enemy3_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round2_enemy3_health.setText("")
self.round2_enemy3_health.setObjectName("round2_enemy3_health")
self.gridLayout_7.addWidget(self.round2_enemy3_health, 5, 0, 1, 1)
self.round2_enemy2_health = QtWidgets.QLabel(self.groupBox_2)
self.round2_enemy2_health.setMinimumSize(QtCore.QSize(150, 0))
self.round2_enemy2_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round2_enemy2_health.setText("")
self.round2_enemy2_health.setObjectName("round2_enemy2_health")
self.gridLayout_7.addWidget(self.round2_enemy2_health, 5, 1, 1, 1)
self.round3_enemy2_health = QtWidgets.QLabel(self.groupBox_2)
self.round3_enemy2_health.setMinimumSize(QtCore.QSize(150, 0))
self.round3_enemy2_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round3_enemy2_health.setText("")
self.round3_enemy2_health.setObjectName("round3_enemy2_health")
self.gridLayout_7.addWidget(self.round3_enemy2_health, 8, 1, 1, 1)
self.round1_enemy2_health = QtWidgets.QLabel(self.groupBox_2)
self.round1_enemy2_health.setMinimumSize(QtCore.QSize(150, 0))
self.round1_enemy2_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round1_enemy2_health.setText("")
self.round1_enemy2_health.setObjectName("round1_enemy2_health")
self.gridLayout_7.addWidget(self.round1_enemy2_health, 2, 1, 1, 1)
self.round2_enemy1_class = QtWidgets.QLabel(self.groupBox_2)
self.round2_enemy1_class.setMinimumSize(QtCore.QSize(150, 0))
self.round2_enemy1_class.setMaximumSize(QtCore.QSize(150, 16777215))
self.round2_enemy1_class.setText("")
self.round2_enemy1_class.setObjectName("round2_enemy1_class")
self.gridLayout_7.addWidget(self.round2_enemy1_class, 4, 2, 1, 1)
self.round1_enemy1_class = QtWidgets.QLabel(self.groupBox_2)
self.round1_enemy1_class.setMinimumSize(QtCore.QSize(150, 0))
self.round1_enemy1_class.setMaximumSize(QtCore.QSize(150, 28))
self.round1_enemy1_class.setText("")
self.round1_enemy1_class.setObjectName("round1_enemy1_class")
self.gridLayout_7.addWidget(self.round1_enemy1_class, 1, 2, 1, 1)
self.round1_enemy1_health = QtWidgets.QLabel(self.groupBox_2)
self.round1_enemy1_health.setMinimumSize(QtCore.QSize(150, 0))
self.round1_enemy1_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round1_enemy1_health.setText("")
self.round1_enemy1_health.setObjectName("round1_enemy1_health")
self.gridLayout_7.addWidget(self.round1_enemy1_health, 2, 2, 1, 1)
self.round2_enemy1_health = QtWidgets.QLabel(self.groupBox_2)
self.round2_enemy1_health.setMinimumSize(QtCore.QSize(150, 0))
self.round2_enemy1_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round2_enemy1_health.setText("")
self.round2_enemy1_health.setObjectName("round2_enemy1_health")
self.gridLayout_7.addWidget(self.round2_enemy1_health, 5, 2, 1, 1)
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.round3_enemy1_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round3_enemy1_pic.setEnabled(False)
self.round3_enemy1_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round3_enemy1_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round3_enemy1_pic.setText("")
self.round3_enemy1_pic.setIconSize(QtCore.QSize(64, 64))
self.round3_enemy1_pic.setObjectName("round3_enemy1_pic")
self.horizontalLayout_12.addWidget(self.round3_enemy1_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_12, 6, 2, 1, 1)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.round1_enemy3_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round1_enemy3_pic.setEnabled(False)
self.round1_enemy3_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round1_enemy3_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round1_enemy3_pic.setText("")
self.round1_enemy3_pic.setIconSize(QtCore.QSize(64, 64))
self.round1_enemy3_pic.setObjectName("round1_enemy3_pic")
self.horizontalLayout_5.addWidget(self.round1_enemy3_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_5, 0, 0, 1, 1)
self.round3_enemy2_class = QtWidgets.QLabel(self.groupBox_2)
self.round3_enemy2_class.setMinimumSize(QtCore.QSize(150, 0))
self.round3_enemy2_class.setMaximumSize(QtCore.QSize(150, 28))
self.round3_enemy2_class.setText("")
self.round3_enemy2_class.setObjectName("round3_enemy2_class")
self.gridLayout_7.addWidget(self.round3_enemy2_class, 7, 1, 1, 1)
self.verticalLayout_4.addLayout(self.gridLayout_7)
self.verticalLayout_6.addLayout(self.verticalLayout_4)
self.horizontalLayout_15.addWidget(self.groupBox_2)
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_3.sizePolicy().hasHeightForWidth())
self.groupBox_3.setSizePolicy(sizePolicy)
self.groupBox_3.setTitle("")
self.groupBox_3.setObjectName("groupBox_3")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.groupBox_3)
self.verticalLayout_7.setContentsMargins(5, 5, 5, 5)
self.verticalLayout_7.setSpacing(5)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.round1_label_random = QtWidgets.QLabel(self.groupBox_3)
self.round1_label_random.setEnabled(False)
self.round1_label_random.setMaximumSize(QtCore.QSize(100, 16777215))
self.round1_label_random.setObjectName("round1_label_random")
self.verticalLayout_5.addWidget(self.round1_label_random)
self.round1_bar_random = QtWidgets.QSlider(self.groupBox_3)
self.round1_bar_random.setEnabled(False)
self.round1_bar_random.setMaximumSize(QtCore.QSize(100, 16777215))
self.round1_bar_random.setMinimum(90)
self.round1_bar_random.setMaximum(110)
self.round1_bar_random.setProperty("value", 90)
self.round1_bar_random.setOrientation(QtCore.Qt.Horizontal)
self.round1_bar_random.setObjectName("round1_bar_random")
self.verticalLayout_5.addWidget(self.round1_bar_random)
self.gridLayout_2.addLayout(self.verticalLayout_5, 1, 8, 1, 1)
self.round1_servant2_np = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant2_np.setEnabled(False)
self.round1_servant2_np.setObjectName("round1_servant2_np")
self.gridLayout_2.addWidget(self.round1_servant2_np, 4, 5, 1, 1)
self.round1_servant3_np = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant3_np.setEnabled(False)
self.round1_servant3_np.setObjectName("round1_servant3_np")
self.gridLayout_2.addWidget(self.round1_servant3_np, 4, 6, 1, 1)
self.horizontalLayout_16 = QtWidgets.QHBoxLayout()
self.horizontalLayout_16.setObjectName("horizontalLayout_16")
self.round1_servant2_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant2_pic.setEnabled(False)
self.round1_servant2_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round1_servant2_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round1_servant2_pic.setText("")
self.round1_servant2_pic.setIconSize(QtCore.QSize(64, 70))
self.round1_servant2_pic.setObjectName("round1_servant2_pic")
self.horizontalLayout_16.addWidget(self.round1_servant2_pic)
self.gridLayout_2.addLayout(self.horizontalLayout_16, 1, 5, 1, 1)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.round1_servant1_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant1_pic.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.round1_servant1_pic.sizePolicy().hasHeightForWidth())
self.round1_servant1_pic.setSizePolicy(sizePolicy)
self.round1_servant1_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round1_servant1_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round1_servant1_pic.setText("")
self.round1_servant1_pic.setIconSize(QtCore.QSize(64, 70))
self.round1_servant1_pic.setObjectName("round1_servant1_pic")
self.horizontalLayout_9.addWidget(self.round1_servant1_pic)
self.gridLayout_2.addLayout(self.horizontalLayout_9, 1, 4, 1, 1)
self.horizontalLayout_19 = QtWidgets.QHBoxLayout()
self.horizontalLayout_19.setObjectName("horizontalLayout_19")
self.round1_master_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round1_master_pic.setEnabled(False)
self.round1_master_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round1_master_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round1_master_pic.setText("")
self.round1_master_pic.setIconSize(QtCore.QSize(64, 64))
self.round1_master_pic.setObjectName("round1_master_pic")
self.horizontalLayout_19.addWidget(self.round1_master_pic)
self.gridLayout_2.addLayout(self.horizontalLayout_19, 1, 7, 1, 1)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.round1_servant1_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant1_skill1.setEnabled(False)
self.round1_servant1_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant1_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant1_skill1.setText("")
self.round1_servant1_skill1.setIconSize(QtCore.QSize(30, 30))
self.round1_servant1_skill1.setObjectName("round1_servant1_skill1")
self.horizontalLayout_11.addWidget(self.round1_servant1_skill1)
self.round1_servant1_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant1_skill2.setEnabled(False)
self.round1_servant1_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant1_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant1_skill2.setText("")
self.round1_servant1_skill2.setIconSize(QtCore.QSize(30, 30))
self.round1_servant1_skill2.setObjectName("round1_servant1_skill2")
self.horizontalLayout_11.addWidget(self.round1_servant1_skill2)
self.round1_servant1_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant1_skill3.setEnabled(False)
self.round1_servant1_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant1_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant1_skill3.setText("")
self.round1_servant1_skill3.setIconSize(QtCore.QSize(30, 30))
self.round1_servant1_skill3.setObjectName("round1_servant1_skill3")
self.horizontalLayout_11.addWidget(self.round1_servant1_skill3)
self.gridLayout_2.addLayout(self.horizontalLayout_11, 3, 4, 1, 1)
self.horizontalLayout_17 = QtWidgets.QHBoxLayout()
self.horizontalLayout_17.setObjectName("horizontalLayout_17")
self.round1_servant3_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant3_pic.setEnabled(False)
self.round1_servant3_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round1_servant3_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round1_servant3_pic.setText("")
self.round1_servant3_pic.setIconSize(QtCore.QSize(64, 70))
self.round1_servant3_pic.setObjectName("round1_servant3_pic")
self.horizontalLayout_17.addWidget(self.round1_servant3_pic)
self.gridLayout_2.addLayout(self.horizontalLayout_17, 1, 6, 1, 1)
self.btn_round1_next = QtWidgets.QPushButton(self.groupBox_3)
self.btn_round1_next.setEnabled(False)
self.btn_round1_next.setMinimumSize(QtCore.QSize(0, 30))
self.btn_round1_next.setMaximumSize(QtCore.QSize(16777215, 30))
self.btn_round1_next.setObjectName("btn_round1_next")
self.gridLayout_2.addWidget(self.btn_round1_next, 3, 8, 1, 1)
self.round1_servant1_np = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant1_np.setEnabled(False)
self.round1_servant1_np.setObjectName("round1_servant1_np")
self.gridLayout_2.addWidget(self.round1_servant1_np, 4, 4, 1, 1)
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.round1_servant3_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant3_skill1.setEnabled(False)
self.round1_servant3_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant3_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant3_skill1.setText("")
self.round1_servant3_skill1.setIconSize(QtCore.QSize(30, 30))
self.round1_servant3_skill1.setObjectName("round1_servant3_skill1")
self.horizontalLayout_14.addWidget(self.round1_servant3_skill1)
self.round1_servant3_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant3_skill2.setEnabled(False)
self.round1_servant3_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant3_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant3_skill2.setText("")
self.round1_servant3_skill2.setIconSize(QtCore.QSize(30, 30))
self.round1_servant3_skill2.setObjectName("round1_servant3_skill2")
self.horizontalLayout_14.addWidget(self.round1_servant3_skill2)
self.round1_servant3_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant3_skill3.setEnabled(False)
self.round1_servant3_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant3_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant3_skill3.setText("")
self.round1_servant3_skill3.setIconSize(QtCore.QSize(30, 30))
self.round1_servant3_skill3.setObjectName("round1_servant3_skill3")
self.horizontalLayout_14.addWidget(self.round1_servant3_skill3)
self.gridLayout_2.addLayout(self.horizontalLayout_14, 3, 6, 1, 1)
self.horizontalLayout_18 = QtWidgets.QHBoxLayout()
self.horizontalLayout_18.setObjectName("horizontalLayout_18")
self.round1_master_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_master_skill1.setEnabled(False)
self.round1_master_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round1_master_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round1_master_skill1.setText("")
self.round1_master_skill1.setIconSize(QtCore.QSize(30, 30))
self.round1_master_skill1.setObjectName("round1_master_skill1")
self.horizontalLayout_18.addWidget(self.round1_master_skill1)
self.round1_master_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_master_skill2.setEnabled(False)
self.round1_master_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round1_master_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round1_master_skill2.setText("")
self.round1_master_skill2.setIconSize(QtCore.QSize(30, 30))
self.round1_master_skill2.setObjectName("round1_master_skill2")
self.horizontalLayout_18.addWidget(self.round1_master_skill2)
self.round1_master_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_master_skill3.setEnabled(False)
self.round1_master_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round1_master_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round1_master_skill3.setText("")
self.round1_master_skill3.setIconSize(QtCore.QSize(30, 30))
self.round1_master_skill3.setObjectName("round1_master_skill3")
self.horizontalLayout_18.addWidget(self.round1_master_skill3)
self.gridLayout_2.addLayout(self.horizontalLayout_18, 3, 7, 1, 1)
self.horizontalLayout_13 = QtWidgets.QHBoxLayout()
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.round1_servant2_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant2_skill1.setEnabled(False)
self.round1_servant2_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant2_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant2_skill1.setText("")
self.round1_servant2_skill1.setIconSize(QtCore.QSize(30, 30))
self.round1_servant2_skill1.setObjectName("round1_servant2_skill1")
self.horizontalLayout_13.addWidget(self.round1_servant2_skill1)
self.round1_servant2_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant2_skill2.setEnabled(False)
self.round1_servant2_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant2_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant2_skill2.setText("")
self.round1_servant2_skill2.setIconSize(QtCore.QSize(30, 30))
self.round1_servant2_skill2.setObjectName("round1_servant2_skill2")
self.horizontalLayout_13.addWidget(self.round1_servant2_skill2)
self.round1_servant2_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant2_skill3.setEnabled(False)
self.round1_servant2_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant2_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant2_skill3.setText("")
self.round1_servant2_skill3.setIconSize(QtCore.QSize(30, 30))
self.round1_servant2_skill3.setObjectName("round1_servant2_skill3")
self.horizontalLayout_13.addWidget(self.round1_servant2_skill3)
self.gridLayout_2.addLayout(self.horizontalLayout_13, 3, 5, 1, 1)
self.verticalLayout_7.addLayout(self.gridLayout_2)
self.gridLayout_4 = QtWidgets.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
self.round2_servant3_np = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant3_np.setEnabled(False)
self.round2_servant3_np.setObjectName("round2_servant3_np")
self.gridLayout_4.addWidget(self.round2_servant3_np, 3, 5, 1, 1)
self.horizontalLayout_181 = QtWidgets.QHBoxLayout()
self.horizontalLayout_181.setObjectName("horizontalLayout_181")
self.round2_master_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_master_skill1.setEnabled(False)
self.round2_master_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round2_master_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round2_master_skill1.setText("")
self.round2_master_skill1.setIconSize(QtCore.QSize(30, 30))
self.round2_master_skill1.setObjectName("round2_master_skill1")
self.horizontalLayout_181.addWidget(self.round2_master_skill1)
self.round2_master_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_master_skill2.setEnabled(False)
self.round2_master_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round2_master_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round2_master_skill2.setText("")
self.round2_master_skill2.setIconSize(QtCore.QSize(30, 30))
self.round2_master_skill2.setObjectName("round2_master_skill2")
self.horizontalLayout_181.addWidget(self.round2_master_skill2)
self.round2_master_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_master_skill3.setEnabled(False)
self.round2_master_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round2_master_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round2_master_skill3.setText("")
self.round2_master_skill3.setIconSize(QtCore.QSize(30, 30))
self.round2_master_skill3.setObjectName("round2_master_skill3")
self.horizontalLayout_181.addWidget(self.round2_master_skill3)
self.gridLayout_4.addLayout(self.horizontalLayout_181, 1, 6, 1, 1)
self.round2_servant1_np = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant1_np.setEnabled(False)
self.round2_servant1_np.setObjectName("round2_servant1_np")
self.gridLayout_4.addWidget(self.round2_servant1_np, 3, 3, 1, 1)
self.horizontalLayout_171 = QtWidgets.QHBoxLayout()
self.horizontalLayout_171.setObjectName("horizontalLayout_171")
self.round2_servant3_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant3_pic.setEnabled(False)
self.round2_servant3_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round2_servant3_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round2_servant3_pic.setText("")
self.round2_servant3_pic.setIconSize(QtCore.QSize(64, 70))
self.round2_servant3_pic.setObjectName("round2_servant3_pic")
self.horizontalLayout_171.addWidget(self.round2_servant3_pic)
self.gridLayout_4.addLayout(self.horizontalLayout_171, 0, 5, 1, 1)
self.horizontalLayout_161 = QtWidgets.QHBoxLayout()
self.horizontalLayout_161.setObjectName("horizontalLayout_161")
self.round2_servant2_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant2_pic.setEnabled(False)
self.round2_servant2_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round2_servant2_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round2_servant2_pic.setText("")
self.round2_servant2_pic.setIconSize(QtCore.QSize(64, 70))
self.round2_servant2_pic.setObjectName("round2_servant2_pic")
self.horizontalLayout_161.addWidget(self.round2_servant2_pic)
self.gridLayout_4.addLayout(self.horizontalLayout_161, 0, 4, 1, 1)
self.horizontalLayout_131 = QtWidgets.QHBoxLayout()
self.horizontalLayout_131.setObjectName("horizontalLayout_131")
self.round2_servant2_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant2_skill1.setEnabled(False)
self.round2_servant2_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant2_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant2_skill1.setText("")
self.round2_servant2_skill1.setIconSize(QtCore.QSize(30, 30))
self.round2_servant2_skill1.setObjectName("round2_servant2_skill1")
self.horizontalLayout_131.addWidget(self.round2_servant2_skill1)
self.round2_servant2_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant2_skill2.setEnabled(False)
self.round2_servant2_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant2_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant2_skill2.setText("")
self.round2_servant2_skill2.setIconSize(QtCore.QSize(30, 30))
self.round2_servant2_skill2.setObjectName("round2_servant2_skill2")
self.horizontalLayout_131.addWidget(self.round2_servant2_skill2)
self.round2_servant2_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant2_skill3.setEnabled(False)
self.round2_servant2_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant2_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant2_skill3.setText("")
self.round2_servant2_skill3.setIconSize(QtCore.QSize(30, 30))
self.round2_servant2_skill3.setObjectName("round2_servant2_skill3")
self.horizontalLayout_131.addWidget(self.round2_servant2_skill3)
self.gridLayout_4.addLayout(self.horizontalLayout_131, 1, 4, 1, 1)
self.horizontalLayout_141 = QtWidgets.QHBoxLayout()
self.horizontalLayout_141.setObjectName("horizontalLayout_141")
self.round2_servant3_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant3_skill1.setEnabled(False)
self.round2_servant3_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant3_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant3_skill1.setText("")
self.round2_servant3_skill1.setIconSize(QtCore.QSize(30, 30))
self.round2_servant3_skill1.setObjectName("round2_servant3_skill1")
self.horizontalLayout_141.addWidget(self.round2_servant3_skill1)
self.round2_servant3_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant3_skill2.setEnabled(False)
self.round2_servant3_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant3_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant3_skill2.setText("")
self.round2_servant3_skill2.setIconSize(QtCore.QSize(30, 30))
self.round2_servant3_skill2.setObjectName("round2_servant3_skill2")
self.horizontalLayout_141.addWidget(self.round2_servant3_skill2)
self.round2_servant3_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant3_skill3.setEnabled(False)
self.round2_servant3_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant3_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant3_skill3.setText("")
self.round2_servant3_skill3.setIconSize(QtCore.QSize(30, 30))
self.round2_servant3_skill3.setObjectName("round2_servant3_skill3")
self.horizontalLayout_141.addWidget(self.round2_servant3_skill3)
self.gridLayout_4.addLayout(self.horizontalLayout_141, 1, 5, 1, 1)
self.btn_round2_next = QtWidgets.QPushButton(self.groupBox_3)
self.btn_round2_next.setEnabled(False)
self.btn_round2_next.setMinimumSize(QtCore.QSize(0, 30))
self.btn_round2_next.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.btn_round2_next.setObjectName("btn_round2_next")
self.gridLayout_4.addWidget(self.btn_round2_next, 1, 7, 1, 1)
self.horizontalLayout_191 = QtWidgets.QHBoxLayout()
self.horizontalLayout_191.setObjectName("horizontalLayout_191")
self.round2_master_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round2_master_pic.setEnabled(False)
self.round2_master_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round2_master_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round2_master_pic.setText("")
self.round2_master_pic.setIconSize(QtCore.QSize(64, 64))
self.round2_master_pic.setObjectName("round2_master_pic")
self.horizontalLayout_191.addWidget(self.round2_master_pic)
self.gridLayout_4.addLayout(self.horizontalLayout_191, 0, 6, 1, 1)
self.round2_servant2_np = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant2_np.setEnabled(False)
self.round2_servant2_np.setObjectName("round2_servant2_np")
self.gridLayout_4.addWidget(self.round2_servant2_np, 3, 4, 1, 1)
self.horizontalLayout_91 = QtWidgets.QHBoxLayout()
self.horizontalLayout_91.setObjectName("horizontalLayout_91")
self.round2_servant1_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant1_pic.setEnabled(False)
self.round2_servant1_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round2_servant1_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round2_servant1_pic.setText("")
self.round2_servant1_pic.setIconSize(QtCore.QSize(64, 70))
self.round2_servant1_pic.setObjectName("round2_servant1_pic")
self.horizontalLayout_91.addWidget(self.round2_servant1_pic)
self.gridLayout_4.addLayout(self.horizontalLayout_91, 0, 3, 1, 1)
self.verticalLayout_12 = QtWidgets.QVBoxLayout()
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.round2_label_random = QtWidgets.QLabel(self.groupBox_3)
self.round2_label_random.setEnabled(False)
self.round2_label_random.setObjectName("round2_label_random")
self.verticalLayout_12.addWidget(self.round2_label_random)
self.round2_bar_random = QtWidgets.QSlider(self.groupBox_3)
self.round2_bar_random.setEnabled(False)
self.round2_bar_random.setMaximumSize(QtCore.QSize(100, 16777215))
self.round2_bar_random.setMinimum(90)
self.round2_bar_random.setMaximum(110)
self.round2_bar_random.setProperty("value", 90)
self.round2_bar_random.setOrientation(QtCore.Qt.Horizontal)
self.round2_bar_random.setObjectName("round2_bar_random")
self.verticalLayout_12.addWidget(self.round2_bar_random)
self.gridLayout_4.addLayout(self.verticalLayout_12, 0, 7, 1, 1)
self.horizontalLayout_111 = QtWidgets.QHBoxLayout()
self.horizontalLayout_111.setObjectName("horizontalLayout_111")
self.round2_servant1_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant1_skill1.setEnabled(False)
self.round2_servant1_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant1_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant1_skill1.setText("")
self.round2_servant1_skill1.setIconSize(QtCore.QSize(30, 30))
self.round2_servant1_skill1.setObjectName("round2_servant1_skill1")
self.horizontalLayout_111.addWidget(self.round2_servant1_skill1)
self.round2_servant1_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant1_skill2.setEnabled(False)
self.round2_servant1_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant1_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant1_skill2.setText("")
self.round2_servant1_skill2.setIconSize(QtCore.QSize(30, 30))
self.round2_servant1_skill2.setObjectName("round2_servant1_skill2")
self.horizontalLayout_111.addWidget(self.round2_servant1_skill2)
self.round2_servant1_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant1_skill3.setEnabled(False)
self.round2_servant1_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant1_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant1_skill3.setText("")
self.round2_servant1_skill3.setIconSize(QtCore.QSize(30, 30))
self.round2_servant1_skill3.setObjectName("round2_servant1_skill3")
self.horizontalLayout_111.addWidget(self.round2_servant1_skill3)
self.gridLayout_4.addLayout(self.horizontalLayout_111, 1, 3, 1, 1)
self.verticalLayout_7.addLayout(self.gridLayout_4)
self.gridLayout_5 = QtWidgets.QGridLayout()
self.gridLayout_5.setObjectName("gridLayout_5")
self.round3_servant3_np = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant3_np.setEnabled(False)
self.round3_servant3_np.setObjectName("round3_servant3_np")
self.gridLayout_5.addWidget(self.round3_servant3_np, 3, 6, 1, 1)
self.horizontalLayout_192 = QtWidgets.QHBoxLayout()
self.horizontalLayout_192.setObjectName("horizontalLayout_192")
self.round3_master_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round3_master_pic.setEnabled(False)
self.round3_master_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round3_master_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round3_master_pic.setText("")
self.round3_master_pic.setIconSize(QtCore.QSize(64, 64))
self.round3_master_pic.setObjectName("round3_master_pic")
self.horizontalLayout_192.addWidget(self.round3_master_pic)
self.gridLayout_5.addLayout(self.horizontalLayout_192, 0, 7, 1, 1)
self.horizontalLayout_92 = QtWidgets.QHBoxLayout()
self.horizontalLayout_92.setObjectName("horizontalLayout_92")
self.round3_servant1_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant1_pic.setEnabled(False)
self.round3_servant1_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round3_servant1_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round3_servant1_pic.setText("")
self.round3_servant1_pic.setIconSize(QtCore.QSize(64, 70))
self.round3_servant1_pic.setObjectName("round3_servant1_pic")
self.horizontalLayout_92.addWidget(self.round3_servant1_pic)
self.gridLayout_5.addLayout(self.horizontalLayout_92, 0, 3, 1, 1)
self.round3_servant1_np = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant1_np.setEnabled(False)
self.round3_servant1_np.setObjectName("round3_servant1_np")
self.gridLayout_5.addWidget(self.round3_servant1_np, 3, 3, 1, 1)
self.round3_servant2_np = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant2_np.setEnabled(False)
self.round3_servant2_np.setObjectName("round3_servant2_np")
self.gridLayout_5.addWidget(self.round3_servant2_np, 3, 5, 1, 1)
self.horizontalLayout_172 = QtWidgets.QHBoxLayout()
self.horizontalLayout_172.setObjectName("horizontalLayout_172")
self.round3_servant3_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant3_pic.setEnabled(False)
self.round3_servant3_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round3_servant3_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round3_servant3_pic.setText("")
self.round3_servant3_pic.setIconSize(QtCore.QSize(64, 70))
self.round3_servant3_pic.setObjectName("round3_servant3_pic")
self.horizontalLayout_172.addWidget(self.round3_servant3_pic)
self.gridLayout_5.addLayout(self.horizontalLayout_172, 0, 6, 1, 1)
self.horizontalLayout_162 = QtWidgets.QHBoxLayout()
self.horizontalLayout_162.setObjectName("horizontalLayout_162")
self.round3_servant2_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant2_pic.setEnabled(False)
self.round3_servant2_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round3_servant2_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round3_servant2_pic.setText("")
self.round3_servant2_pic.setIconSize(QtCore.QSize(64, 70))
self.round3_servant2_pic.setObjectName("round3_servant2_pic")
self.horizontalLayout_162.addWidget(self.round3_servant2_pic)
self.gridLayout_5.addLayout(self.horizontalLayout_162, 0, 5, 1, 1)
self.btn_output_strategy = QtWidgets.QPushButton(self.groupBox_3)
self.btn_output_strategy.setEnabled(False)
self.btn_output_strategy.setObjectName("btn_output_strategy")
self.gridLayout_5.addWidget(self.btn_output_strategy, 2, 9, 1, 1)
self.horizontalLayout_132 = QtWidgets.QHBoxLayout()
self.horizontalLayout_132.setObjectName("horizontalLayout_132")
self.round3_servant2_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant2_skill1.setEnabled(False)
self.round3_servant2_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant2_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant2_skill1.setText("")
self.round3_servant2_skill1.setIconSize(QtCore.QSize(30, 30))
self.round3_servant2_skill1.setObjectName("round3_servant2_skill1")
self.horizontalLayout_132.addWidget(self.round3_servant2_skill1)
self.round3_servant2_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant2_skill2.setEnabled(False)
self.round3_servant2_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant2_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant2_skill2.setText("")
self.round3_servant2_skill2.setIconSize(QtCore.QSize(30, 30))
self.round3_servant2_skill2.setObjectName("round3_servant2_skill2")
self.horizontalLayout_132.addWidget(self.round3_servant2_skill2)
self.round3_servant2_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant2_skill3.setEnabled(False)
self.round3_servant2_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant2_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant2_skill3.setText("")
self.round3_servant2_skill3.setIconSize(QtCore.QSize(30, 30))
self.round3_servant2_skill3.setObjectName("round3_servant2_skill3")
self.horizontalLayout_132.addWidget(self.round3_servant2_skill3)
self.gridLayout_5.addLayout(self.horizontalLayout_132, 2, 5, 1, 1)
self.verticalLayout_13 = QtWidgets.QVBoxLayout()
self.verticalLayout_13.setObjectName("verticalLayout_13")
self.round3_label_random = QtWidgets.QLabel(self.groupBox_3)
self.round3_label_random.setEnabled(False)
self.round3_label_random.setObjectName("round3_label_random")
self.verticalLayout_13.addWidget(self.round3_label_random)
self.round3_bar_random = QtWidgets.QSlider(self.groupBox_3)
self.round3_bar_random.setEnabled(False)
self.round3_bar_random.setMaximumSize(QtCore.QSize(100, 16777215))
self.round3_bar_random.setMinimum(90)
self.round3_bar_random.setMaximum(110)
self.round3_bar_random.setProperty("value", 90)
self.round3_bar_random.setOrientation(QtCore.Qt.Horizontal)
self.round3_bar_random.setObjectName("round3_bar_random")
self.verticalLayout_13.addWidget(self.round3_bar_random)
self.gridLayout_5.addLayout(self.verticalLayout_13, 0, 9, 1, 1)
self.horizontalLayout_182 = QtWidgets.QHBoxLayout()
self.horizontalLayout_182.setObjectName("horizontalLayout_182")
self.round3_master_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_master_skill1.setEnabled(False)
self.round3_master_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round3_master_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round3_master_skill1.setText("")
self.round3_master_skill1.setIconSize(QtCore.QSize(30, 30))
self.round3_master_skill1.setObjectName("round3_master_skill1")
self.horizontalLayout_182.addWidget(self.round3_master_skill1)
self.round3_master_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_master_skill2.setEnabled(False)
self.round3_master_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round3_master_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round3_master_skill2.setText("")
self.round3_master_skill2.setIconSize(QtCore.QSize(30, 30))
self.round3_master_skill2.setObjectName("round3_master_skill2")
self.horizontalLayout_182.addWidget(self.round3_master_skill2)
self.round3_master_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_master_skill3.setEnabled(False)
self.round3_master_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round3_master_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round3_master_skill3.setText("")
self.round3_master_skill3.setIconSize(QtCore.QSize(30, 30))
self.round3_master_skill3.setObjectName("round3_master_skill3")
self.horizontalLayout_182.addWidget(self.round3_master_skill3)
self.gridLayout_5.addLayout(self.horizontalLayout_182, 2, 7, 1, 1)
self.horizontalLayout_142 = QtWidgets.QHBoxLayout()
self.horizontalLayout_142.setObjectName("horizontalLayout_142")
self.round3_servant3_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant3_skill1.setEnabled(False)
self.round3_servant3_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant3_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant3_skill1.setText("")
self.round3_servant3_skill1.setIconSize(QtCore.QSize(30, 30))
self.round3_servant3_skill1.setObjectName("round3_servant3_skill1")
self.horizontalLayout_142.addWidget(self.round3_servant3_skill1)
self.round3_servant3_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant3_skill2.setEnabled(False)
self.round3_servant3_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant3_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant3_skill2.setText("")
self.round3_servant3_skill2.setIconSize(QtCore.QSize(30, 30))
self.round3_servant3_skill2.setObjectName("round3_servant3_skill2")
self.horizontalLayout_142.addWidget(self.round3_servant3_skill2)
self.round3_servant3_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant3_skill3.setEnabled(False)
self.round3_servant3_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant3_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant3_skill3.setText("")
self.round3_servant3_skill3.setIconSize(QtCore.QSize(30, 30))
self.round3_servant3_skill3.setObjectName("round3_servant3_skill3")
self.horizontalLayout_142.addWidget(self.round3_servant3_skill3)
self.gridLayout_5.addLayout(self.horizontalLayout_142, 2, 6, 1, 1)
self.horizontalLayout_112 = QtWidgets.QHBoxLayout()
self.horizontalLayout_112.setObjectName("horizontalLayout_112")
self.round3_servant1_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant1_skill1.setEnabled(False)
self.round3_servant1_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant1_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant1_skill1.setText("")
self.round3_servant1_skill1.setIconSize(QtCore.QSize(30, 30))
self.round3_servant1_skill1.setObjectName("round3_servant1_skill1")
self.horizontalLayout_112.addWidget(self.round3_servant1_skill1)
self.round3_servant1_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant1_skill2.setEnabled(False)
self.round3_servant1_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant1_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant1_skill2.setText("")
self.round3_servant1_skill2.setIconSize(QtCore.QSize(30, 30))
self.round3_servant1_skill2.setObjectName("round3_servant1_skill2")
self.horizontalLayout_112.addWidget(self.round3_servant1_skill2)
self.round3_servant1_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant1_skill3.setEnabled(False)
self.round3_servant1_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant1_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant1_skill3.setText("")
self.round3_servant1_skill3.setIconSize(QtCore.QSize(30, 30))
self.round3_servant1_skill3.setObjectName("round3_servant1_skill3")
self.horizontalLayout_112.addWidget(self.round3_servant1_skill3)
self.gridLayout_5.addLayout(self.horizontalLayout_112, 2, 3, 1, 1)
self.verticalLayout_7.addLayout(self.gridLayout_5)
self.horizontalLayout_15.addWidget(self.groupBox_3)
self.verticalLayout_2.addLayout(self.horizontalLayout_15)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1070, 26))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.action_update = QtWidgets.QAction(MainWindow)
self.action_update.setObjectName("action_update")
self.action_mooncell = QtWidgets.QAction(MainWindow)
self.action_mooncell.setObjectName("action_mooncell")
self.action_support = QtWidgets.QAction(MainWindow)
self.action_support.setObjectName("action_support")
self.action_kazemai = QtWidgets.QAction(MainWindow)
self.action_kazemai.setObjectName("action_kazemai")
self.action_about = QtWidgets.QAction(MainWindow)
self.action_about.setObjectName("action_about")
self.menu.addAction(self.action_update)
self.menu.addAction(self.action_support)
self.menu.addAction(self.action_about)
self.menu.addSeparator()
self.menu.addAction(self.action_mooncell)
self.menu.addAction(self.action_kazemai)
self.menubar.addAction(self.menu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "FGO周回组队器"))
self.label_costume_state_4.setText(_translate("MainWindow", "等级: "))
self.label_servant_state_2.setText(_translate("MainWindow", "技能: \n"
"宝具: \n"
"等级: \n"
"芙芙:"))
self.label_costume_state_1.setText(_translate("MainWindow", "等级: "))
self.box_skill_confirm.setText(_translate("MainWindow", "技能提示"))
self.label.setText(_translate("MainWindow", "概率阈值:"))
self.label_costume_state_5.setText(_translate("MainWindow", "等级: "))
self.label_costume_state_6.setText(_translate("MainWindow", "等级: "))
self.label_servant_state_1.setText(_translate("MainWindow", "技能: \n"
"宝具: \n"
"等级: \n"
"芙芙:"))
self.label_master_state.setText(_translate("MainWindow", "等级:"))
self.label_costume_state_2.setText(_translate("MainWindow", "等级: "))
self.label_servant_state_3.setText(_translate("MainWindow", "技能: \n"
"宝具: \n"
"等级: \n"
"芙芙:"))
self.label_servant_state_5.setText(_translate("MainWindow", "技能: \n"
"宝具: \n"
"等级: \n"
"芙芙:"))
self.label_costume_state_3.setText(_translate("MainWindow", "等级: "))
self.label_servant_state_4.setText(_translate("MainWindow", "技能: \n"
"宝具: \n"
"等级: \n"
"芙芙:"))
self.label_servant_state_6.setText(_translate("MainWindow", "技能: \n"
"宝具: \n"
"等级: \n"
"芙芙:"))
self.btn_set_progress.setText(_translate("MainWindow", "选择进度"))
self.btn_choose_level.setText(_translate("MainWindow", "设置副本"))
self.btn_confirm_team.setText(_translate("MainWindow", "确 认"))
self.btn_change_team.setText(_translate("MainWindow", "修 改"))
self.btn_round_reset.setText(_translate("MainWindow", "撤 销"))
self.round1_label_random.setText(_translate("MainWindow", "随机数: 0.9"))
self.round1_servant2_np.setText(_translate("MainWindow", "宝具: 0%"))
self.round1_servant3_np.setText(_translate("MainWindow", "宝具: 0%"))
self.btn_round1_next.setText(_translate("MainWindow", "下一回合"))
self.round1_servant1_np.setText(_translate("MainWindow", "宝具: 0%"))
self.round2_servant3_np.setText(_translate("MainWindow", "宝具: 0%"))
self.round2_servant1_np.setText(_translate("MainWindow", "宝具: 0%"))
self.btn_round2_next.setText(_translate("MainWindow", "下一回合"))
self.round2_servant2_np.setText(_translate("MainWindow", "宝具: 0%"))
self.round2_label_random.setText(_translate("MainWindow", "随机数: 0.9"))
self.round3_servant3_np.setText(_translate("MainWindow", "宝具: 0%"))
self.round3_servant1_np.setText(_translate("MainWindow", "宝具: 0%"))
self.round3_servant2_np.setText(_translate("MainWindow", "宝具: 0%"))
self.btn_output_strategy.setText(_translate("MainWindow", "输出操作"))
self.round3_label_random.setText(_translate("MainWindow", "随机数: 0.9"))
self.menu.setTitle(_translate("MainWindow", "选 项"))
self.action_update.setText(_translate("MainWindow", "数据库更新"))
self.action_mooncell.setText(_translate("MainWindow", "Mooncell"))
self.action_support.setText(_translate("MainWindow", "软件更新"))
self.action_kazemai.setText(_translate("MainWindow", "茹西教王的理想乡"))
self.action_about.setText(_translate("MainWindow", "关于软件"))
| 1.8125 | 2 |
pyhutool/core/Io.py | kaysen820/PyHuTool | 0 | 17728 | class File:
@staticmethod
def tail(self, file_path, lines=10):
with open(file_path, 'rb') as f:
total_lines_wanted = lines
block_size = 1024
f.seek(0, 2)
block_end_byte = f.tell()
lines_to_go = total_lines_wanted
block_number = -1
blocks = []
while lines_to_go > 0 and block_end_byte > 0:
if block_end_byte - block_size > 0:
f.seek(block_number * block_size, 2)
block = f.read(block_size)
else:
f.seek(0, 0)
block = f.read(block_end_byte)
lines_found = block.count(b'\n')
lines_to_go -= lines_found
block_end_byte -= block_size
block_number -= 1
blocks.append(block)
all_read_text = b''.join(blocks)
lines_found = all_read_text.count(b'\n')
if lines_found > total_lines_wanted:
return all_read_text.split(b'\n')[-total_lines_wanted:][:-1]
else:
return all_read_text.split(b'\n')[-lines_found:] | 3.3125 | 3 |
utils/argparse.py | toytag/self-supervised-learning-for-semantic-segmentation | 0 | 17729 | <reponame>toytag/self-supervised-learning-for-semantic-segmentation
import argparse
class ArchParser(argparse.ArgumentParser):
def __init__(self, model_names, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_argument('-a', '--arch', metavar='ARCH', choices=model_names,
help='model architecture: ' + ' | '.join(model_names))
class BasicParser(argparse.ArgumentParser):
def __init__(self, description='PyTorch Segmentation Pretraining', **kwargs):
super().__init__(description=description, **kwargs)
self.add_argument('data_root', metavar='DIR', help='path to dataset')
self.add_argument('--work-dir', default='./', metavar='DIR',
help='path to work directory (default: ./)')
self.add_argument('--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
self.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run (default: 200)')
self.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
self.add_argument('--crop-size', default=512, type=int,
help='augmentation crop size (default: 512)')
self.add_argument('--batch-size', default=256, type=int, metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
self.add_argument('--base-lr', default=0.01, type=float, metavar='LR',
help='initial learning rate', dest='base_lr')
self.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum of SGD solver')
self.add_argument('--wd', '--weight-decay', default=1e-4, type=float, metavar='W',
help='weight decay (default: 1e-4)', dest='weight_decay')
self.add_argument('--print-freq', default=10, type=int, metavar='N',
help='print frequency (default: 10 iters)')
self.add_argument('--checkpoint-freq', default=10, type=int, metavar='N',
help='checkpoint frequency (default: 10 epochs)')
self.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
self.add_argument('--pretrained', default='', type=str, metavar='PATH',
help='path to init checkpoint (default: none)')
self.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
self.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
self.add_argument('--dist-url', default='tcp://localhost:29500', type=str,
help='url used to set up distributed training')
self.add_argument('--dist-backend', default='nccl',
type=str, help='distributed backend')
self.add_argument('--seed', default=None, type=int,
help='seed for initializing training.')
self.add_argument('--gpu', default=None,
type=int, help='GPU id to use.')
self.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
self.add_argument('--fp16', action='store_true',
help='mixed percision training')
self.add_argument('--update-interval', default=1,
type=int, help='gradient update interval')
| 2.734375 | 3 |
finicky/schema.py | yaaminu/yaval | 14 | 17730 | <gh_stars>10-100
from finicky.validators import ValidationException
def validate(schema, data, hook=None):
"""
Given an input named `data` validate it against `schema` returning errors encountered if any and the input data.
It's important to note that, validation continues even if an error is encountered.
:param schema: The schema against which the input should be validated. A schema is essentially a mapping of field
names and their corresponding validators. The keys must match exactly to fields in the input data.
Pyval comes with a set of standard validators defined in `finicky.validators` but you can write your own
if your need a more customized one.
A validator is a function which takes in a single argument and returns the validated
data on success. On failure, it must raise a `finicky.validators.ValidationException`. To illustrate in code:
```
def my_custom_batch_no_validator(input):
if not input:
raise ValidationException("This field is required")
elif not input.contains("prefix_")
raise ValidationException("This field must start with `prefix_`")
else:
# you can modify the value, like striping off whitespace, rounding up the number etc
return input.strip()
```
:param data: The input data to be validated, cannot be none
:param hook: An optional custom hook function that shall be invoked when all fields have passed validation. It is
especially useful in situations where the validity of the input also conditionally relies on multiple
fields. it takes as an input, the newly validated data and must return the input on success
or raise a `finicky.validators.ValidationException` on failure. This hook may modify the input before
returning it.
:return: A tuple of the form (errors:str[], validated_data)
"""
errors = {}
validated_data = {}
for key in schema:
try:
validated_data[key] = schema[key](data.get(key))
except ValidationException as e:
errors[key] = e.errors
if hook and not errors:
try:
validated_data = hook(validated_data)
except ValidationException as e:
errors["___hook"] = e.errors
return errors, validated_data
__all__ = ("validate",)
| 3.875 | 4 |
tests/unique_test.py | yohplala/vaex | 0 | 17731 | from common import small_buffer
import pytest
import numpy as np
import pyarrow as pa
import vaex
def test_unique_arrow(df_factory):
ds = df_factory(x=vaex.string_column(['a', 'b', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'a']))
with small_buffer(ds, 2):
assert set(ds.unique(ds.x)) == {'a', 'b'}
values, index = ds.unique(ds.x, return_inverse=True)
assert np.array(values)[index].tolist() == ds.x.tolist()
def test_unique(df_factory):
ds = df_factory(colors=['red', 'green', 'blue', 'green'])
with small_buffer(ds, 2):
assert set(ds.unique(ds.colors)) == {'red', 'green', 'blue'}
values, index = ds.unique(ds.colors, return_inverse=True)
assert np.array(values)[index].tolist() == ds.colors.tolist()
ds = df_factory(x=['a', 'b', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'a'])
with small_buffer(ds, 2):
assert set(ds.unique(ds.x)) == {'a', 'b'}
values, index = ds.unique(ds.x, return_inverse=True)
assert np.array(values)[index].tolist() == ds.x.tolist()
def test_unique_f4(df_factory):
x = np.array([np.nan, 0, 1, np.nan, 2, np.nan], dtype='f4')
df = df_factory(x=x)
assert list(sorted(df.x.unique()))[1:] == [np.nan, 0, 1, 2][1:]
def test_unique_nan(df_factory):
x = [np.nan, 0, 1, np.nan, 2, np.nan]
df = df_factory(x=x)
assert list(sorted(df.x.unique()))[1:] == [np.nan, 0, 1, 2][1:]
with small_buffer(df, 2):
values, indices = df.unique(df.x, return_inverse=True)
values = np.array(values)
values = values[indices]
mask = np.isnan(values)
assert values[~mask].tolist() == df.x.to_numpy()[~mask].tolist()
# assert indices.tolist() == [0, 1, 2, 0, 3, 0]
def test_unique_missing(df_factory):
# Create test databn
x = np.array([None, 'A', 'B', -1, 0, 2, '', '', None, None, None, np.nan, np.nan, np.nan, np.nan])
df = df_factory(x=x)
uniques = df.x.unique(dropnan=True)
assert set(uniques) == set(['', 'A', 'B', -1, 0, 2, None])
def test_unique_missing_numeric(array_factory):
df = vaex.from_arrays(x=array_factory([1, None]))
values = df.x.unique()
assert set(values) == {1, None}
# assert list(sorted(df.x.unique()))[1:] == [np.nan, 0, 1, 2][1:]
def test_unique_string_missing(df_factory):
x = ['John', None, 'Sally', None, '0.0']
df = df_factory(x=x)
result = df.x.unique()
assert len(result) == 4
assert'John' in result
assert None in result
assert 'Sally'
def test_unique_list(df_types):
df = df_types
assert set(df.string_list.unique()) == {'aap', 'noot', 'mies', None}
assert set(df.int_list.unique()) == {1, 2, 3, 4, 5, None}
@pytest.mark.parametrize("future", [False, True])
def test_unique_categorical(df_factory, future):
df = df_factory(x=vaex.string_column(['a', 'c', 'b', 'a', 'a']))
df = df.ordinal_encode('x')
df = df._future() if future else df
if future:
assert df.x.dtype == str
assert set(df.x.unique()) == {'a', 'b', 'c'}
assert df.x.nunique() == 3
else:
assert df.x.dtype == int
assert set(df.x.unique()) == {0, 1, 2}
assert df.x.nunique() == 3
| 2.34375 | 2 |
utils/utilsFreq.py | geobook2015/magPy | 1 | 17732 | # utility functions for frequency related stuff
import numpy as np
import numpy.fft as fft
import math
def getFrequencyArray(fs, samples):
# frequencies go from to nyquist
nyquist = fs/2
return np.linspace(0, nyquist, samples)
# use this function for all FFT calculations
# then if change FFT later (i.e. FFTW), just replace one function
def forwardFFT(data, **kwargs):
if "norm" in kwargs and not kwargs["norm"]:
return fft.rfft(data, axis=0)
return fft.rfft(data, norm='ortho', axis=0)
def inverseFFT(data, length, **kwargs):
if "norm" in kwargs and not kwargs["norm"]:
return fft.irfft(data, n=length)
return fft.irfft(data, n=length, norm='ortho')
def padNextPower2(size):
next2Power = math.ceil(math.log(size,2))
next2Size = math.pow(2, int(next2Power))
return int(next2Size) - size
| 3.078125 | 3 |
tools/extract_keywords.py | bitdotioinc/pglast | 0 | 17733 | # -*- coding: utf-8 -*-
# :Project: pglast -- Extract keywords from PostgreSQL header
# :Created: dom 06 ago 2017 23:34:53 CEST
# :Author: <NAME> <<EMAIL>>
# :License: GNU General Public License version 3 or later
# :Copyright: © 2017, 2018 Lele Gaifax
#
from collections import defaultdict
from os.path import basename
from pprint import pformat
from re import match
import subprocess
HEADER = """\
# -*- coding: utf-8 -*-
# :Project: pglast -- DO NOT EDIT: automatically extracted from %s @ %s
# :Author: <NAME> <<EMAIL>>
# :License: GNU General Public License version 3 or later
# :Copyright: © 2017 <NAME>
#
"""
def get_libpg_query_version():
result = subprocess.check_output(['git', 'describe', '--all', '--long'],
cwd='libpg_query')
return result.decode('utf-8').strip().split('/')[-1]
def extract_keywords(source):
for line in source.splitlines():
if line.startswith('PG_KEYWORD'):
m = match(r'PG_KEYWORD\("([^"]+)",[^,]+,\s*([\w_]+)\)', line.strip())
if m:
yield m.group(1), m.group(2)
def workhorse(args):
with open(args.header, encoding='utf-8') as f:
source = f.read()
bytype = defaultdict(set)
for keyword, type in extract_keywords(source):
bytype[type].add(keyword)
with open(args.output, 'w', encoding='utf-8') as output:
output.write(HEADER % (basename(args.header), get_libpg_query_version()))
for type in sorted(bytype):
output.write('\n')
output.write(type + 'S')
output.write(' = {')
keywords = pformat(bytype[type], compact=True, indent=len(type)+5, width=95)
output.write(keywords[1:].lstrip())
output.write('\n')
def main():
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description="PG keyword extractor",
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('header',
help="source header to be processed")
parser.add_argument('output',
help="Python source to be created")
args = parser.parse_args()
workhorse(args)
if __name__ == '__main__':
main()
| 2.5625 | 3 |
tests/vi/test_indent_text_object.py | trishume/VintageousPlus | 6 | 17734 | <reponame>trishume/VintageousPlus
from collections import namedtuple
from sublime import Region as R
from VintageousPlus.tests import set_text
from VintageousPlus.tests import add_sel
from VintageousPlus.tests import ViewTest
from VintageousPlus.vi.text_objects import find_indent_text_object
test = namedtuple('simple_test', 'content start expected expected_inclusive msg')
# cursor is at "|"
TESTS_INDENT = (
test(start=R(37, 37), expected=R(29, 62), expected_inclusive=R(29, 62), msg='should find indent', content='''
# a comment
def a_ruby_block
some_c|all
another_one
yerp
end'''.lstrip()),
test(start=R(37, 37), expected=R(29, 41), expected_inclusive=R(29, 80), msg='should find indent when there\'s a blank line', content='''
# a comment
def a_ruby_block
some_c|all
another_one_with(blank_line)
yerp
end'''.lstrip()),
test(start=R(42, 42), expected=R(34, 57), expected_inclusive=R(34, 58), msg='should work with pyhton-ey functions', content='''
# a python thing
def a_python_fn:
some_c|all()
what()
a_python_fn'''.lstrip()),
test(start=R(57, 57), expected=R(57, 57), expected_inclusive=R(57, 57), msg='should ignore when triggered on a whitespace-only line', content='''
# a python thing
def a_python_fn:
some_call()
what()
a_python_fn'''.lstrip()),
)
class Test_indent(ViewTest):
def clear_selected_regions(self):
self.view.sel().clear()
def testAll(self):
for (i, data) in enumerate(TESTS_INDENT):
self.clear_selected_regions()
self.write(data.content)
for inclusive in [True, False]:
start, end = find_indent_text_object(self.view, data.start, inclusive)
actual = R(start, end)
msg = "failed at test index {0}: {1}".format(i, data.msg)
expected = data.expected_inclusive if inclusive else data.expected
self.assertEqual(expected, actual, msg)
| 2.375 | 2 |
example_write_camera_frames_to_hdf5.py | mihsamusev/pytrl_demo | 0 | 17735 | import cv2
from imutils.paths import list_images
import imutils
import re
import datetime
from datasets.hdf5datasetwriter import HDF5DatasetWriter
import progressbar
def get_frame_number(impath):
return int(re.search(r"image data (\d+)", impath).group(1))
def get_timestamp(impath):
"assuming that the timestamp is a part of the image name"
date_str = impath.split(".")[0]
date_str = re.split(r"image data \d+ ", date_str)[1]
date = datetime.datetime.strptime(date_str, '%Y-%b-%d %H %M %S %f')
return date
# Load the data, sort by frame number
basePath = "D:/create lidar trafik data/newer data/ImageData/"
impaths = list(list_images(basePath))
impaths = sorted(impaths, key=get_frame_number)
print("[INFO] building HDF5 dataset...")
outputPath = basePath + "frames.hdf5"
writer = HDF5DatasetWriter((len(impaths), 360, 640, 3), outputPath)
# initialize the progress bar
widgets = ["Building Dataset: ", progressbar.Percentage(), " ",
progressbar.Bar(), " ", progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=len(impaths),
widgets=widgets).start()
for i, impath in enumerate(impaths):
date = get_timestamp(impath)
ts = (date - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1)
image = cv2.imread(impath)
image = imutils.resize(image, width=640)
writer.add([image], [ts])
pbar.update(i)
# close the HDF5 writer
pbar.finish()
writer.close() | 2.65625 | 3 |
touch.py | mendelmaker/dipn | 8 | 17736 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import time
import cv2
from real.camera import Camera
from robot import Robot
from subprocess import Popen, PIPE
def get_camera_to_robot_transformation(camera):
color_img, depth_img = camera.get_data()
cv2.imwrite("real/temp.jpg", color_img)
p = Popen(['./real/detect-from-file', "real/temp.jpg"], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
tag_info = output.decode("utf-8")
tag_info = tag_info.split("\n")[:4]
for i, info in enumerate(tag_info):
tag_info[i] = info.split(" ")
print(tag_info)
tag_info = np.array(tag_info, dtype=np.float32)
assert(tag_info.shape == (4, 3))
tag_loc_camera = tag_info
tag_loc_robot = {
22: (270.15 / 1000, -637.0 / 1000),
7: (255.35 / 1000, -247.6 / 1000),
4: (-272.7 / 1000, -660.9 / 1000),
2: (-289.8 / 1000, -274.2 / 1000)
}
camera_to_robot = cv2.getPerspectiveTransform(
np.float32([tag[1:] for tag in tag_loc_camera]),
np.float32([tag_loc_robot[tag[0]] for tag in tag_loc_camera]))
return camera_to_robot
# User options (change me)
# --------------- Setup options ---------------
tcp_host_ip = '172.16.31.10' # IP and port to robot arm as TCP client (UR5)
tcp_host_ip = "172.19.97.157"
tcp_port = 30002
rtc_host_ip = '172.16.31.10' # IP and port to robot arm as real-time client (UR5)
rtc_host_ip = "172.19.97.157"
rtc_port = 30003
# Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
workspace_limits = np.asarray([[0.3, 0.748], [-0.224, 0.224], [-0.255, -0.1]])
workspace_limits = np.asarray([[-0.237, 0.211], [-0.683, -0.235], [0.18, 0.4]])
# workspace_limits = np.asarray([[-0.224, 0.224], [-0.674, -0.226], [0.18, 0.4]])
# Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
tool_orientation = [2.22, -2.22, 0]
tool_orientation = [0, -3.14, 0]
# ---------------------------------------------
# Move robot to home pose
robot = Robot(False, None, None, workspace_limits,
tcp_host_ip, tcp_port, rtc_host_ip, rtc_port,
False, None, None)
robot.open_gripper()
transformation_matrix = get_camera_to_robot_transformation(robot.camera)
# Slow down robot
robot.joint_acc = 1.4
robot.joint_vel = 1.05
# Callback function for clicking on OpenCV window
click_point_pix = ()
camera_color_img, camera_depth_img = robot.get_camera_data()
def mouseclick_callback(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
global camera, robot, click_point_pix
click_point_pix = (x, y)
# Get click point in camera coordinates
# click_z = camera_depth_img[y][x] * robot.cam_depth_scale
# click_x = np.multiply(x-robot.cam_intrinsics[0][2],click_z/robot.cam_intrinsics[0][0])
# click_y = np.multiply(y-robot.cam_intrinsics[1][2],click_z/robot.cam_intrinsics[1][1])
# if click_z == 0:
# return
# click_point = np.asarray([click_x,click_y,click_z])
# click_point.shape = (3,1)
# # Convert camera to robot coordinates
# # camera2robot = np.linalg.inv(robot.cam_pose)
# camera2robot = robot.cam_pose
# target_position = np.dot(camera2robot[0:3,0:3],click_point) + camera2robot[0:3,3:]
# target_position = target_position[0:3,0]
# print(target_position)
camera_pt = np.array([x, y, 1])
robot_pt = np.dot(transformation_matrix, camera_pt)
robot_pt = np.array([robot_pt[0], robot_pt[1]]) / robot_pt[2]
print([robot_pt[0], robot_pt[1], -0.1])
print(robot.parse_tcp_state_data(robot.get_state(), "cartesian_info"))
robot.move_to([robot_pt[0], robot_pt[1], 0.3], tool_orientation)
# Show color and depth frames
cv2.namedWindow('color')
cv2.setMouseCallback('color', mouseclick_callback)
cv2.namedWindow('depth')
while True:
camera_color_img, camera_depth_img = robot.get_camera_data()
bgr_data = cv2.cvtColor(camera_color_img, cv2.COLOR_RGB2BGR)
if len(click_point_pix) != 0:
bgr_data = cv2.circle(bgr_data, click_point_pix, 7, (0, 0, 255), 2)
cv2.imshow('color', bgr_data)
camera_depth_img[camera_depth_img < 0.19] = 0
cv2.imshow('depth', camera_depth_img)
if cv2.waitKey(1) == ord('c'):
break
cv2.destroyAllWindows()
| 2.59375 | 3 |
py/WB-Klein/5/5.4_cc.py | kassbohm/wb-snippets | 0 | 17737 | # Header starts here.
from sympy.physics.units import *
from sympy import *
# Rounding:
import decimal
from decimal import Decimal as DX
from copy import deepcopy
def iso_round(obj, pv, rounding=decimal.ROUND_HALF_EVEN):
import sympy
"""
Rounding acc. to DIN EN ISO 80000-1:2013-08
place value = Rundestellenwert
"""
assert pv in set([
# place value # round to:
1, # 1
0.1, # 1st digit after decimal
0.01, # 2nd
0.001, # 3rd
0.0001, # 4th
0.00001, # 5th
0.000001, # 6th
0.0000001, # 7th
0.00000001, # 8th
0.000000001, # 9th
0.0000000001, # 10th
])
objc = deepcopy(obj)
try:
tmp = DX(str(float(objc)))
objc = tmp.quantize(DX(str(pv)), rounding=rounding)
except:
for i in range(len(objc)):
tmp = DX(str(float(objc[i])))
objc[i] = tmp.quantize(DX(str(pv)), rounding=rounding)
return objc
# LateX:
kwargs = {}
kwargs["mat_str"] = "bmatrix"
kwargs["mat_delim"] = ""
# kwargs["symbol_names"] = {FB: "F^{\mathsf B}", }
# Units:
(k, M, G ) = ( 10**3, 10**6, 10**9 )
(mm, cm) = ( m/1000, m/100 )
Newton = kg*m/s**2
Pa = Newton/m**2
MPa = M*Pa
GPa = G*Pa
kN = k*Newton
deg = pi/180
half = S(1)/2
# Header ends here.
#
EA, l, F1, F2 = var("EA, l, F1, F2")
sub_list = [
( EA, 2 *Pa*m**2 ),
( l, 1 *m ),
( F1, 1 *Newton /2 ), # due to symmetry
( F2, 2 *Newton /2 ), # due to symmetry
]
def k(phi):
""" element stiffness matrix """
# phi is angle between:
# 1. vector along global x axis
# 2. vector along 1-2-axis of truss
# phi is counted positively about z.
# pprint("phi / deg:")
# pprint(N(deg(phi),3))
(c, s) = ( cos(phi), sin(phi) )
(cc, ss, sc) = ( c*c, s*s, s*c)
return Matrix(
[
[ cc, sc, -cc, -sc],
[ sc, ss, -sc, -ss],
[-cc, -sc, cc, sc],
[-sc, -ss, sc, ss],
])
(p1, p2, p3) = (315*pi/180, 0 *pi/180, 45 *pi/180)
# k2 uses only 1/2 A due to symmetry:
(k1, k2, k3) = (EA/l*k(p1), EA/2/l*k(p2), EA/l*k(p3))
pprint("\nk1 / (EA / l): ")
pprint(k1 / (EA/l) )
pprint("\nk2 / (EA / l): ")
pprint(k2 / (EA/l) )
pprint("\nk3 / (EA / l): ")
pprint(k3 / (EA/l) )
K = EA/l*Matrix([
[ 1 , -S(1)/2 ],
[ -S(1)/2, 1 ]
])
u2x, u3x = var("u2x, u3x")
u = Matrix([u2x , u3x ])
f = Matrix([F1 , F2 ])
u2x, u3x = var("u2x, u3x")
eq = Eq(K*u , f)
sol = solve(eq, [u2x, u3x])
pprint("\nSolution:")
pprint(sol)
u2x, u3x = sol[u2x], sol[u3x]
pprint("\nu2x / m:")
tmp = u2x.subs(sub_list)
tmp /= m
pprint(tmp)
pprint("\nu3x / m:")
tmp = u3x.subs(sub_list)
tmp /= m
pprint(tmp)
pprint("\nF1x / N:")
tmp = - EA/l * u2x/2
tmp = tmp.subs(sub_list)
tmp /= Newton
pprint(tmp)
# k1 / (EA / l):
# ⎡1/2 -1/2 -1/2 1/2 ⎤
# ⎢ ⎥
# ⎢-1/2 1/2 1/2 -1/2⎥
# ⎢ ⎥
# ⎢-1/2 1/2 1/2 -1/2⎥
# ⎢ ⎥
# ⎣1/2 -1/2 -1/2 1/2 ⎦
#
# k2 / (EA / l):
# ⎡1/2 0 -1/2 0⎤
# ⎢ ⎥
# ⎢ 0 0 0 0⎥
# ⎢ ⎥
# ⎢-1/2 0 1/2 0⎥
# ⎢ ⎥
# ⎣ 0 0 0 0⎦
#
# k3 / (EA / l):
# ⎡1/2 1/2 -1/2 -1/2⎤
# ⎢ ⎥
# ⎢1/2 1/2 -1/2 -1/2⎥
# ⎢ ⎥
# ⎢-1/2 -1/2 1/2 1/2 ⎥
# ⎢ ⎥
# ⎣-1/2 -1/2 1/2 1/2 ⎦
#
# Solution:
# ⎧ 2⋅l⋅(2⋅F₁ + F₂) 2⋅l⋅(F₁ + 2⋅F₂)⎫
# ⎨u2x: ───────────────, u3x: ───────────────⎬
# ⎩ 3⋅EA 3⋅EA ⎭
#
# u2x / m:
# 2/3
#
# u3x / m:
# 5/6
#
# F1x / N:
# -2/3
| 2.8125 | 3 |
ucc_csv_create.py | MasonDMitchell/HackNC-2019 | 0 | 17738 | #!/usr/bin/python3
import csv
ucc_dictionary_file_list = [
'./downloads/diary08/diary08/uccd08.txt',
'./downloads/diary09/diary09/uccd09.txt',
'./downloads/diary11/diary11/uccd11.txt',
'./downloads/diary10/diary10/uccd10.txt',
]
cleaned_ucc_dictionary = dict()
for dictionary in ucc_dictionary_file_list:
with open(dictionary) as file:
line_list = file.read().splitlines()
for line in line_list:
ucc_tuple = tuple(line.split(" ", 1))
cleaned_ucc_dictionary[int(ucc_tuple[0])] = ucc_tuple[1]
with open('cleaned_ucc_dictionary.csv', 'w', newline='') as csvfile:
ucc_writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for key, value in cleaned_ucc_dictionary.items():
ucc_writer.writerow([key, value])
# print(len(cleaned_ucc_dictionary.keys()))
# print(line_list) | 2.734375 | 3 |
eventsourcing/system/ray.py | gerbyzation/eventsourcing | 0 | 17739 | <filename>eventsourcing/system/ray.py
import datetime
import os
import traceback
from inspect import ismethod
from queue import Empty, Queue
from threading import Event, Lock, Thread
from time import sleep
from typing import Dict, Optional, Tuple, Type
import ray
from eventsourcing.application.process import ProcessApplication
from eventsourcing.application.simple import (
ApplicationWithConcreteInfrastructure,
Prompt,
PromptToPull,
is_prompt_to_pull,
)
from eventsourcing.domain.model.decorators import retry
from eventsourcing.domain.model.events import subscribe, unsubscribe
from eventsourcing.exceptions import (
EventSourcingError,
ExceptionWrapper,
OperationalError,
ProgrammingError,
RecordConflictError,
)
from eventsourcing.infrastructure.base import (
DEFAULT_PIPELINE_ID,
RecordManagerWithNotifications,
)
from eventsourcing.system.definition import (
AbstractSystemRunner,
System,
TProcessApplication,
)
from eventsourcing.system.rayhelpers import RayDbJob, RayPrompt
from eventsourcing.system.raysettings import ray_init_kwargs
from eventsourcing.system.runner import DEFAULT_POLL_INTERVAL
ray.init(**ray_init_kwargs)
MAX_QUEUE_SIZE = 1
PAGE_SIZE = 20
MICROSLEEP = 0.000
PROMPT_WITH_NOTIFICATION_IDS = False
PROMPT_WITH_NOTIFICATION_OBJS = False
GREEDY_PULL_NOTIFICATIONS = True
class RayRunner(AbstractSystemRunner):
"""
Uses actor model framework to run a system of process applications.
"""
def __init__(
self,
system: System,
pipeline_ids=(DEFAULT_PIPELINE_ID,),
poll_interval: Optional[int] = None,
setup_tables: bool = False,
sleep_for_setup_tables: int = 0,
db_uri: Optional[str] = None,
**kwargs
):
super(RayRunner, self).__init__(system=system, **kwargs)
self.pipeline_ids = list(pipeline_ids)
self.poll_interval = poll_interval
self.setup_tables = setup_tables or system.setup_tables
self.sleep_for_setup_tables = sleep_for_setup_tables
self.db_uri = db_uri
self.ray_processes: Dict[Tuple[str, int], RayProcess] = {}
def start(self):
"""
Starts all the actors to run a system of process applications.
"""
# Check we have the infrastructure classes we need.
for process_class in self.system.process_classes.values():
if not isinstance(process_class, ApplicationWithConcreteInfrastructure):
if not self.infrastructure_class:
raise ProgrammingError("infrastructure_class is not set")
elif not issubclass(
self.infrastructure_class, ApplicationWithConcreteInfrastructure
):
raise ProgrammingError(
"infrastructure_class is not a subclass of {}".format(
ApplicationWithConcreteInfrastructure
)
)
# Get the DB_URI.
# Todo: Support different URI for different application classes.
env_vars = {}
db_uri = self.db_uri or os.environ.get("DB_URI")
if db_uri is not None:
env_vars["DB_URI"] = db_uri
# Start processes.
for pipeline_id in self.pipeline_ids:
for process_name, process_class in self.system.process_classes.items():
ray_process_id = RayProcess.remote(
application_process_class=process_class,
infrastructure_class=self.infrastructure_class,
env_vars=env_vars,
poll_interval=self.poll_interval,
pipeline_id=pipeline_id,
setup_tables=self.setup_tables,
)
self.ray_processes[(process_name, pipeline_id)] = ray_process_id
init_ids = []
for key, ray_process in self.ray_processes.items():
process_name, pipeline_id = key
upstream_names = self.system.upstream_names[process_name]
downstream_names = self.system.downstream_names[process_name]
downstream_processes = {
name: self.ray_processes[(name, pipeline_id)]
for name in downstream_names
}
upstream_processes = {}
for upstream_name in upstream_names:
upstream_process = self.ray_processes[(upstream_name, pipeline_id)]
upstream_processes[upstream_name] = upstream_process
init_ids.append(
ray_process.init.remote(upstream_processes, downstream_processes)
)
ray.get(init_ids)
def get_ray_process(self, process_name, pipeline_id=DEFAULT_PIPELINE_ID):
assert isinstance(process_name, str)
return self.ray_processes[(process_name, pipeline_id)]
def close(self):
super(RayRunner, self).close()
for process in self.ray_processes.values():
process.stop.remote()
def get(
self, process_class: Type[TProcessApplication], pipeline_id=DEFAULT_PIPELINE_ID
) -> TProcessApplication:
assert issubclass(process_class, ProcessApplication)
process_name = process_class.create_name()
ray_process = self.get_ray_process(process_name, pipeline_id)
return ProxyApplication(ray_process)
@ray.remote
class RayProcess:
def __init__(
self,
application_process_class: Type[ProcessApplication],
infrastructure_class: Type[ApplicationWithConcreteInfrastructure],
env_vars: dict = None,
pipeline_id: int = DEFAULT_PIPELINE_ID,
poll_interval: int = None,
setup_tables: bool = False,
):
# Process application args.
self.application_process_class = application_process_class
self.infrastructure_class = infrastructure_class
self.daemon = True
self.pipeline_id = pipeline_id
self.poll_interval = poll_interval or DEFAULT_POLL_INTERVAL
self.setup_tables = setup_tables
if env_vars is not None:
os.environ.update(env_vars)
# Setup threads, queues, and threading events.
self.readers_lock = Lock()
self._has_been_prompted = Event()
self.heads_lock = Lock()
self.heads = {}
self.positions_lock = Lock()
self.positions = {}
self.positions_initialised = Event()
self.db_jobs_queue = Queue(maxsize=MAX_QUEUE_SIZE)
self.upstream_event_queue = Queue(maxsize=MAX_QUEUE_SIZE)
self.downstream_prompt_queue = Queue() # no maxsize, call() can put prompt
self.has_been_stopped = Event()
self.db_jobs_thread = Thread(target=self.db_jobs)
self.db_jobs_thread.setDaemon(True)
self.db_jobs_thread.start()
self.process_prompts_thread = Thread(target=self._process_prompts)
self.process_prompts_thread.setDaemon(True)
self.process_prompts_thread.start()
self.process_events_thread = Thread(target=self._process_events)
self.process_events_thread.setDaemon(True)
self.process_events_thread.start()
self.push_prompts_thread = Thread(target=self._push_prompts)
self.push_prompts_thread.setDaemon(True)
self.push_prompts_thread.start()
self._notification_rayids = {}
self._prompted_notifications = {}
def db_jobs(self):
# print("Running do_jobs")
while not self.has_been_stopped.is_set():
try:
item = self.db_jobs_queue.get(timeout=1)
self.db_jobs_queue.task_done()
except Empty:
if self.has_been_stopped.is_set():
break
else:
if item is None or self.has_been_stopped.is_set():
break
db_job: RayDbJob = item
# self.print_timecheck("Doing db job", item)
try:
db_job.execute()
except Exception as e:
if db_job.error is None:
print(traceback.format_exc())
self._print_timecheck(
"Continuing after error running DB job:", e
)
sleep(1)
# else:
# self.print_timecheck("Done db job", item)
@retry((OperationalError, RecordConflictError), max_attempts=100, wait=0.01)
def do_db_job(self, method, args, kwargs):
db_job = RayDbJob(method, args=args, kwargs=kwargs)
self.db_jobs_queue.put(db_job)
db_job.wait()
if db_job.error:
raise db_job.error
# self.print_timecheck("db job delay:", db_job.delay)
# self.print_timecheck("db job duration:", db_job.duration)
# self.print_timecheck('db job result:', db_job.result)
return db_job.result
def init(self, upstream_processes: dict, downstream_processes: dict) -> None:
"""
Initialise with actor handles for upstream and downstream processes.
Need to initialise after construction so that all handles exist.
"""
self.upstream_processes = upstream_processes
self.downstream_processes = downstream_processes
# Subscribe to broadcast prompts published by the process application.
subscribe(handler=self._enqueue_prompt_to_pull, predicate=is_prompt_to_pull)
# Construct process application object.
process_class = self.application_process_class
if not isinstance(process_class, ApplicationWithConcreteInfrastructure):
if self.infrastructure_class:
process_class = process_class.mixin(self.infrastructure_class)
else:
raise ProgrammingError("infrastructure_class is not set")
class MethodWrapper(object):
def __init__(self, method):
self.method = method
def __call__(self, *args, **kwargs):
try:
return self.method(*args, **kwargs)
except EventSourcingError as e:
return ExceptionWrapper(e)
class ProcessApplicationWrapper(object):
def __init__(self, process_application):
self.process_application = process_application
def __getattr__(self, item):
attribute = getattr(self.process_application, item)
if ismethod(attribute):
return MethodWrapper(attribute)
else:
return attribute
def construct_process():
return process_class(
pipeline_id=self.pipeline_id, setup_table=self.setup_tables
)
process_application = self.do_db_job(construct_process, (), {})
assert isinstance(process_application, ProcessApplication), process_application
self.process_wrapper = ProcessApplicationWrapper(process_application)
self.process_application = process_application
for upstream_name, ray_notification_log in self.upstream_processes.items():
# Make the process follow the upstream notification log.
self.process_application.follow(upstream_name, ray_notification_log)
self._reset_positions()
self.positions_initialised.set()
def _reset_positions(self):
self.do_db_job(self.__reset_positions, (), {})
def __reset_positions(self):
with self.positions_lock:
for upstream_name in self.upstream_processes:
recorded_position = self.process_application.get_recorded_position(
upstream_name
)
self.positions[upstream_name] = recorded_position
def add_downstream_process(self, downstream_name, ray_process_id):
self.downstream_processes[downstream_name] = ray_process_id
def call(self, method_name, *args, **kwargs):
"""
Method for calling methods on process application object.
"""
assert self.positions_initialised.is_set(), "Please call .init() first"
# print("Calling", method_name, args, kwargs)
if self.process_wrapper:
method = getattr(self.process_wrapper, method_name)
return self.do_db_job(method, args, kwargs)
else:
raise Exception(
"Can't call method '%s' before process exists" % method_name
)
def prompt(self, prompt: RayPrompt) -> None:
assert isinstance(prompt, RayPrompt), "Not a RayPrompt: %s" % prompt
for notification_id, rayid in prompt.notification_ids:
# self._print_timecheck("Received ray notification ID:", notification_id, rayid)
self._notification_rayids[(prompt.process_name, notification_id)] = rayid
latest_head = prompt.head_notification_id
upstream_name = prompt.process_name
if PROMPT_WITH_NOTIFICATION_OBJS:
for notification in prompt.notifications:
self._prompted_notifications[
(upstream_name, notification["id"])
] = notification
if latest_head is not None:
with self.heads_lock:
# Update head from prompt.
if upstream_name in self.heads:
if latest_head > self.heads[upstream_name]:
self.heads[upstream_name] = latest_head
self._has_been_prompted.set()
else:
self.heads[upstream_name] = latest_head
self._has_been_prompted.set()
else:
self._has_been_prompted.set()
def _process_prompts(self) -> None:
# Loop until stop event is set.
self.positions_initialised.wait()
while not self.has_been_stopped.is_set():
try:
self.__process_prompts()
except Exception as e:
if not self.has_been_stopped.is_set():
print(traceback.format_exc())
print("Continuing after error in 'process prompts' thread:", e)
print()
sleep(1)
def __process_prompts(self):
# Wait until prompted.
self._has_been_prompted.wait()
if self.has_been_stopped.is_set():
return
# self.print_timecheck('has been prompted')
current_heads = {}
with self.heads_lock:
self._has_been_prompted.clear()
for upstream_name in self.upstream_processes.keys():
current_head = self.heads.get(upstream_name)
current_heads[upstream_name] = current_head
for upstream_name in self.upstream_processes.keys():
with self.positions_lock:
current_position = self.positions.get(upstream_name)
first_id = current_position + 1 # request the next one
current_head = current_heads[upstream_name]
if current_head is None:
last_id = None
elif current_position < current_head:
if GREEDY_PULL_NOTIFICATIONS:
last_id = first_id + PAGE_SIZE - 1
else:
last_id = min(current_head, first_id + PAGE_SIZE - 1)
else:
# self.print_timecheck(
# "Up to date with", upstream_name, current_position,
# current_head
# )
continue
# last_id = first_id + PAGE_SIZE - 1
# self.print_timecheck(
# "Getting notifications in range:",
# upstream_name,
# "%s -> %s" % (first_id, last_id),
# )
upstream_process = self.upstream_processes[upstream_name]
# Works best without prompted head as last requested,
# because there might be more notifications since.
# Todo: However, limit the number to avoid getting too many, and
# if we got full quota, then get again.
notifications = []
if PROMPT_WITH_NOTIFICATION_IDS or PROMPT_WITH_NOTIFICATION_OBJS:
if last_id is not None:
for notification_id in range(first_id, last_id + 1):
if PROMPT_WITH_NOTIFICATION_IDS:
try:
rayid = self._notification_rayids.pop(
(upstream_name, notification_id)
)
except KeyError:
break
else:
notification = ray.get(rayid)
# self._print_timecheck(
# "Got notification from ray id",
# notification_id,
# rayid,
# notification,
# )
notifications.append(notification)
elif PROMPT_WITH_NOTIFICATION_OBJS:
try:
notification = self._prompted_notifications.pop(
(upstream_name, notification_id)
)
# self._print_timecheck(
# "Got notification from prompted notifications dict",
# notification_id,
# notification,
# )
except KeyError:
break
else:
notifications.append(notification)
first_id += 1
# Pull the ones we don't have.
if last_id is None or first_id <= last_id:
# self._print_timecheck("Pulling notifications", first_id, last_id,
# 'from', upstream_name)
rayid = upstream_process.get_notifications.remote(first_id, last_id)
_notifications = ray.get(rayid)
# self._print_timecheck("Pulled notifications", _notifications)
notifications += _notifications
# self.print_timecheck(
# "Obtained notifications:", len(notifications), 'from',
# upstream_name
# )
if len(notifications):
if len(notifications) == PAGE_SIZE:
# self._print_timecheck("Range limit reached, reprompting...")
self._has_been_prompted.set()
position = notifications[-1]["id"]
with self.positions_lock:
current_position = self.positions[upstream_name]
if current_position is None or position > current_position:
self.positions[upstream_name] = position
queue_item = []
for notification in notifications:
# Check causal dependencies.
self.process_application.check_causal_dependencies(
upstream_name, notification.get("causal_dependencies")
)
# Get domain event from notification.
event = self.process_application.event_from_notification(
notification
)
# self.print_timecheck("obtained event", event)
# Put domain event on the queue, for event processing.
queue_item.append((event, notification["id"], upstream_name))
self.upstream_event_queue.put(queue_item)
sleep(MICROSLEEP)
def get_notifications(self, first_notification_id, last_notification_id):
"""
Returns a list of notifications, with IDs from first_notification_id
to last_notification_id, inclusive. IDs are 1-based sequence.
This is called by the "process prompts" thread of a downstream process.
"""
return self.do_db_job(
self._get_notifications, (first_notification_id, last_notification_id), {}
)
def _get_notifications(self, first_notification_id, last_notification_id):
record_manager = self.process_application.event_store.record_manager
assert isinstance(record_manager, RecordManagerWithNotifications)
start = first_notification_id - 1
stop = last_notification_id
return list(record_manager.get_notifications(start, stop))
def _process_events(self):
while not self.has_been_stopped.is_set():
try:
self.__process_events()
except Exception as e:
print(traceback.format_exc())
print("Continuing after error in 'process events' thread:", e)
sleep(1)
def __process_events(self):
try:
queue_item = self.upstream_event_queue.get() # timeout=5)
self.upstream_event_queue.task_done()
except Empty:
if self.has_been_stopped.is_set():
return
else:
if queue_item is None or self.has_been_stopped.is_set():
return
for (domain_event, notification_id, upstream_name) in queue_item:
# print("Processing upstream event:", (domain_event,
# notification_id, upstream_name))
new_events, new_records = (), ()
while not self.has_been_stopped.is_set():
try:
new_events, new_records = self.do_db_job(
method=self.process_application.process_upstream_event,
args=(domain_event, notification_id, upstream_name),
kwargs={},
)
break
except Exception as e:
print(traceback.format_exc())
self._print_timecheck(
"Retrying to reprocess event after error:", e
)
sleep(1)
# Todo: Forever? What if this is the wrong event?
if self.has_been_stopped.is_set():
return
# if new_events:
# self._print_timecheck("new events", len(new_events), new_events)
notifications = ()
notification_ids = ()
notifiable_events = [e for e in new_events if e.__notifiable__]
if len(notifiable_events):
if PROMPT_WITH_NOTIFICATION_IDS or PROMPT_WITH_NOTIFICATION_OBJS:
manager = self.process_application.event_store.record_manager
assert isinstance(manager, RecordManagerWithNotifications)
notification_id_name = manager.notification_id_name
notifications = []
for record in new_records:
if isinstance(
getattr(record, notification_id_name, None), int
):
notifications.append(
manager.create_notification_from_record(record)
)
if len(notifications):
head_notification_id = notifications[-1]["id"]
if PROMPT_WITH_NOTIFICATION_IDS:
notification_ids = self._put_notifications_in_ray_object_store(
notifications
)
# Clear the notifications, avoid sending with IDs.
notifications = ()
else:
head_notification_id = self._get_max_notification_id()
else:
head_notification_id = self._get_max_notification_id()
prompt = RayPrompt(
self.process_application.name,
self.process_application.pipeline_id,
head_notification_id,
notification_ids,
notifications,
)
# self.print_timecheck(
# "putting prompt on downstream " "prompt queue",
# self.downstream_prompt_queue.qsize(),
# )
self.downstream_prompt_queue.put(prompt)
sleep(MICROSLEEP)
# self.print_timecheck(
# "put prompt on downstream prompt " "queue"
# )
# sleep(0.1)
def _put_notifications_in_ray_object_store(self, notifications):
notification_ids = [(n["id"], ray.put(n)) for n in notifications]
return notification_ids
def _enqueue_prompt_to_pull(self, prompt):
# print("Enqueing locally published prompt:", prompt)
self.downstream_prompt_queue.put(prompt)
sleep(MICROSLEEP)
def _push_prompts(self) -> None:
while not self.has_been_stopped.is_set():
try:
self.__push_prompts()
except Exception as e:
print(traceback.format_exc())
print("Continuing after error in 'push prompts' thread:", e)
sleep(1)
def __push_prompts(self):
try:
item = self.downstream_prompt_queue.get() # timeout=1)
self.downstream_prompt_queue.task_done()
# Todo: Instead, drain the queue and consolidate prompts.
except Empty:
self._print_timecheck(
"timed out getting item from downstream prompt " "queue"
)
if self.has_been_stopped.is_set():
return
else:
# self.print_timecheck("task done on downstream prompt queue")
if item is None or self.has_been_stopped.is_set():
return
elif isinstance(item, PromptToPull):
if item.head_notification_id:
head_notification_id = item.head_notification_id
else:
head_notification_id = self._get_max_notification_id()
prompt = RayPrompt(
self.process_application.name,
self.process_application.pipeline_id,
head_notification_id,
)
else:
prompt = item
# self._print_timecheck('pushing prompt with', prompt.notification_ids)
prompt_response_ids = []
# self.print_timecheck("pushing prompts", prompt)
for downstream_name, ray_process in self.downstream_processes.items():
prompt_response_ids.append(ray_process.prompt.remote(prompt))
if self.has_been_stopped.is_set():
return
# self._print_timecheck("pushed prompt to", downstream_name)
ray.get(prompt_response_ids)
# self._print_timecheck("pushed prompts")
def _get_max_notification_id(self):
"""
Returns the highest notification ID of this process application.
:return:
"""
record_manager = self.process_application.event_store.record_manager
assert isinstance(record_manager, RecordManagerWithNotifications)
max_notification_id = self.do_db_job(
record_manager.get_max_notification_id, (), {}
)
# self.print_timecheck("MAX NOTIFICATION ID in DB:", max_notification_id)
return max_notification_id
def stop(self):
"""
Stops the process.
"""
# print("%s actor stopping %s" % (os.getpid(), datetime.datetime.now()))
self.has_been_stopped.set()
# print("%s actor joining db_jobs_thread %s" % (os.getpid(),
# datetime.datetime.now()))
self.db_jobs_queue.put(None)
self.upstream_event_queue.put(None)
self.downstream_prompt_queue.put(None)
self._has_been_prompted.set()
self.positions_initialised.set()
self.db_jobs_thread.join(timeout=1)
assert not self.db_jobs_thread.is_alive(), (
"DB jobs thread still alive"
)
# print("%s actor joining process_events_thread %s" % (os.getpid(),
# datetime.datetime.now()))
self.process_events_thread.join(timeout=1)
assert not self.process_events_thread.is_alive(), (
"Process events thread still alive"
)
# print("%s actor joining process_prompts_thread %s" % (os.getpid(),
# datetime.datetime.now()))
self.process_prompts_thread.join(timeout=1)
assert not self.process_prompts_thread.is_alive(), (
"Process prompts thread still alive"
)
# print("%s actor joining push_prompts_thread %s" % (os.getpid(),
# datetime.datetime.now()))
self.push_prompts_thread.join(timeout=1)
assert not self.push_prompts_thread.is_alive(), (
"Push prompts thread still alive"
)
self.process_application.close()
unsubscribe(handler=self._enqueue_prompt_to_pull, predicate=is_prompt_to_pull)
# print("%s actor stopped %s" % (os.getpid(), datetime.datetime.now()))
ray.actor.exit_actor()
def _print_timecheck(self, activity, *args):
# pass
process_name = self.application_process_class.__name__.lower()
print(
"Timecheck",
datetime.datetime.now(),
self.pipeline_id,
process_name,
activity,
*args
)
class ProxyApplication:
def __init__(self, ray_process: RayProcess):
self.ray_process: RayProcess = ray_process
def __getattr__(self, item):
return ProxyMethod(self.ray_process, item)
class ProxyMethod:
def __init__(self, ray_process: RayProcess, attribute_name: str):
self.ray_process: RayProcess = ray_process
self.attribute_name = attribute_name
def __call__(self, *args, **kwargs):
ray_id = self.ray_process.call.remote(self.attribute_name, *args, **kwargs)
return_value = ray.get(ray_id)
if isinstance(return_value, ExceptionWrapper):
raise return_value.e
else:
return return_value
| 2.1875 | 2 |
authentik/stages/password/migrations/0007_app_password.py | BeryJu/passbook | 15 | 17740 | <reponame>BeryJu/passbook<gh_stars>10-100
# Generated by Django 3.2.6 on 2021-08-23 14:34
import django.contrib.postgres.fields
from django.apps.registry import Apps
from django.db import migrations, models
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from authentik.stages.password import BACKEND_APP_PASSWORD, BACKEND_INBUILT
def update_default_backends(apps: Apps, schema_editor: BaseDatabaseSchemaEditor):
PasswordStage = apps.get_model("authentik_stages_password", "passwordstage")
db_alias = schema_editor.connection.alias
stages = PasswordStage.objects.using(db_alias).filter(name="default-authentication-password")
if not stages.exists():
return
stage = stages.first()
stage.backends.append(BACKEND_APP_PASSWORD)
stage.save()
class Migration(migrations.Migration):
dependencies = [
("authentik_stages_password", "0006_passwordchange_rename"),
]
operations = [
migrations.AlterField(
model_name="passwordstage",
name="backends",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.TextField(
choices=[
("authentik.core.auth.InbuiltBackend", "User database + standard password"),
("authentik.core.auth.TokenBackend", "User database + app passwords"),
(
"authentik.sources.ldap.auth.LDAPBackend",
"User database + LDAP password",
),
]
),
help_text="Selection of backends to test the password against.",
size=None,
),
),
migrations.RunPython(update_default_backends),
]
| 1.90625 | 2 |
article/tests/test_models.py | asb29/Redundant | 0 | 17741 | from django.test import TestCase
from django.contrib.auth.models import User
from article.models import Article, Category
class ArticleModelTestCase(TestCase):
def setUp(self):
self.category = Category.objects.create(name=u'Sports')
self.user = User.objects.create(username=u'test', password=u'<PASSWORD>')
def test_save(self):
new_article = Article.objects.create(
title=u'test',
content=u'test',
author=self.user,
category=self.category
)
self.assertEqual(new_article.title, u'test')
self.assertEqual(new_article.content, u'test')
self.assertEqual(new_article.author, self.user)
self.assertEqual(new_article.category, self.category)
def test_unique_slug(self):
new_article1 = Article.objects.create(
title=u'test',
content=u'test',
author=self.user,
category=self.category
)
new_article2 = Article.objects.create(
title=u'test',
content=u'test',
author=self.user,
category=self.category
)
self.assertTrue(new_article1.slug != new_article2.slug)
| 2.546875 | 3 |
tests/test_import.py | GoodManWEN/typehints_checker | 0 | 17742 | import os , sys
sys.path.append(os.getcwd())
import pytest
from typehints_checker import *
@pytest.mark.asyncio
async def test_import():
... | 1.796875 | 2 |
models/FlagAttachment.py | jeffg2k/RootTheBox | 1 | 17743 | <reponame>jeffg2k/RootTheBox
# -*- coding: utf-8 -*-
"""
Created on Nov 24, 2014
@author: moloch
Copyright 2014 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from uuid import uuid4
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import Unicode, String, Integer
from models.BaseModels import DatabaseObject
from libs.StringCoding import encode, decode
from builtins import str
from tornado.options import options
class FlagAttachment(DatabaseObject):
"""
These are files that the administrator wants to
distribute alongside a flag.
"""
uuid = Column(String(36), unique=True, nullable=False, default=lambda: str(uuid4()))
flag_id = Column(Integer, ForeignKey("flag.id"), nullable=False)
_file_name = Column(Unicode(64), nullable=False)
@property
def file_name(self):
return self._file_name
@file_name.setter
def file_name(self, value):
fname = value.replace("\n", "").replace("\r", "")
self._file_name = str(os.path.basename(fname))[:64]
@property
def data(self):
with open(options.flag_attachment_dir + "/" + self.uuid, "rb") as fp:
return decode(fp.read(), "base64")
@data.setter
def data(self, value):
if self.uuid is None:
self.uuid = str(uuid4())
self.byte_size = len(value)
with open(options.flag_attachment_dir + "/" + self.uuid, "wb") as fp:
fp.write(str(encode(value, "base64")).encode())
def delete_data(self):
""" Remove the file from the file system, if it exists """
fpath = options.flag_attachment_dir + "/" + self.uuid
if os.path.exists(fpath) and os.path.isfile(fpath):
os.unlink(fpath)
| 1.867188 | 2 |
connman_dispatcher/detect.py | a-sk/connman-dispatcher | 4 | 17744 | import glib
import dbus
from dbus.mainloop.glib import DBusGMainLoop
from pyee import EventEmitter
import logbook
logger = logbook.Logger('connman-dispatcher')
__all__ = ['detector']
def property_changed(_, message):
if message.get_member() == "PropertyChanged":
_, state = message.get_args_list()
if state == 'online' and detector.state == 'offline':
logger.info('network state change: online' )
detector.emit('up')
detector.state = 'online'
elif state == 'idle':
logger.info('network state change: offline' )
detector.emit('down')
detector.state = 'offline'
detector = EventEmitter()
DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
bus.add_match_string_non_blocking("interface='net.connman.Manager'")
bus.add_message_filter(property_changed)
manager = dbus.Interface(bus.get_object('net.connman', "/"), 'net.connman.Manager')
def is_online():
properties = manager.GetProperties()
if properties['State'] == 'online':
return True
return False
def run():
detector.state = 'offline'
if is_online:
detector.emit('up')
detector.state = 'online'
mainloop = glib.MainLoop()
mainloop.run()
detector.run = run
| 2.203125 | 2 |
integration/test/test_profile_overflow.py | avilcheslopez/geopm | 0 | 17745 | #!/usr/bin/env python3
#
# Copyright (c) 2015 - 2022, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
"""
Runs an application with a large number of short regions and checks
that the controller successfully runs.
"""
import sys
import unittest
import os
import subprocess
import glob
import geopmpy.io
import geopmpy.agent
import geopmdpy.error
import geopmdpy.topo
from integration.test import geopm_test_launcher
from integration.test import check_trace
class AppConf(object):
"""Class that is used by the test launcher in place of a
geopmpy.io.BenchConf when running the profile_overflow benchmark.
"""
def write(self):
"""Called by the test launcher prior to executing the test application
to write any files required by the application.
"""
pass
def get_exec_path(self):
"""Path to benchmark filled in by template automatically.
"""
script_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.join(script_dir, '.libs', 'test_profile_overflow')
def get_exec_args(self):
"""Returns a list of strings representing the command line arguments
to pass to the test-application for the next run. This is
especially useful for tests that execute the test-application
multiple times.
"""
return []
class TestIntegration_profile_overflow(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Create launcher, execute benchmark and set up class variables.
"""
sys.stdout.write('(' + os.path.basename(__file__).split('.')[0] +
'.' + cls.__name__ + ') ...')
test_name = 'test_profile_overflow'
cls._report_path = '{}.report'.format(test_name)
cls._trace_path = '{}.trace'.format(test_name)
cls._log_path = '{}.log'.format(test_name)
cls._agent_conf_path = test_name + '-agent-config.json'
# Set the job size parameters such that we have a 3 level tree
os.environ["GEOPM_MAX_FAN_OUT"] = "2"
num_node = 4
num_rank = geopmdpy.topo.num_domain(geopmdpy.topo.DOMAIN_CORE) - 2
time_limit = 600
# Configure the test application
app_conf = AppConf()
# Configure the agent
agent_conf = geopmpy.agent.AgentConf(cls._agent_conf_path)
# Create the test launcher with the above configuration
launcher = geopm_test_launcher.TestLauncher(app_conf,
agent_conf,
cls._report_path,
cls._trace_path,
time_limit=time_limit)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
# Run the test application
try:
launcher.run(test_name)
except subprocess.CalledProcessError:
sys.stderr.write('{} failed; check log for details.\n'.format(test_name))
raise
@classmethod
def tearDownClass(cls):
os.environ.pop("GEOPM_MAX_FAN_OUT")
def test_load_report(self):
'''
Test that the report can be loaded.
'''
report = geopmpy.io.RawReport(self._report_path)
hosts = report.host_names()
for hh in hosts:
runtime = report.raw_totals(hh)['runtime (s)']
self.assertNotEqual(0, runtime)
def test_short_region_count(self):
'''
Test that the count for MPI_Barrier is as expected.
'''
report = geopmpy.io.RawReport(self._report_path)
hosts = report.host_names()
for hh in hosts:
region_data = report.raw_region(hh, 'MPI_Barrier')
count = region_data['count']
self.assertEqual(count, 10000000)
def test_sample_rate(self):
'''
Test that the sample rate is regular.
'''
traces = glob.glob(self._trace_path + "*")
if len(traces) == 0:
raise RuntimeError("No traces found with prefix: {}".format(self._trace_path_prefix))
for tt in traces:
check_trace.check_sample_rate(tt, 0.005)
if __name__ == '__main__':
unittest.main()
| 2.484375 | 2 |
Objects/optAlignRNA.py | MooersLab/jupyterlabpymolpysnipsplus | 0 | 17746 | # Description: OptiAlign.py by <NAME> modified for aligning multiple RNA structures.
# Source: Generated while helping Miranda Adams at U of Saint Louis.
"""
cmd.do('python')
cmd.do(' ##############################################################################')
cmd.do('#')
cmd.do('# @SUMMARY: -- QKabsch.py. A python implementation of the optimal superposition')
cmd.do('# of two sets of vectors as proposed by Kabsch 1976 & 1978.')
cmd.do('#')
cmd.do('# @AUTHOR: <NAME>')
cmd.do('# @COPYRIGHT: <NAME> (C), 2005-2007')
cmd.do('# @LICENSE: Released under GPL:')
cmd.do('# This program is free software; you can redistribute it and/or modify')
cmd.do('# it under the terms of the GNU General Public License as published by')
cmd.do('# the Free Software Foundation; either version 2 of the License, or')
cmd.do('# (at your option) any later version.')
cmd.do('# This program is distributed in the hope that it will be useful, but WITHOUT')
cmd.do('# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS')
cmd.do('# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.')
cmd.do('#')
cmd.do('# You should have received a copy of the GNU General Public License along with')
cmd.do('# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin')
cmd.do('# Street, Fifth Floor, Boston, MA 02110-1301, USA ')
cmd.do('#')
cmd.do('# DATE : 2007-01-01')
cmd.do('# REV : 2')
cmd.do('# REQUIREMENTS: numpy')
cmd.do('#')
cmd.do('#')
cmd.do('# Modified optAlign.py to use C1' carbon atoms of RNA for alignment.')
cmd.do('# Jan. 29, 2020 ')
cmd.do('# <NAME>, PhD')
cmd.do('# Univ. of Oklahoma Health Sciences Center')
cmd.do('#')
cmd.do('#############################################################################')
cmd.do('from array import *')
cmd.do(' ')
cmd.do('# system stuff')
cmd.do('import os')
cmd.do('import copy')
cmd.do(' ')
cmd.do('# pretty printing')
cmd.do('import pprint')
cmd.do(' ')
cmd.do('# for importing as a plugin into PyMol')
cmd.do('from pymol import cmd')
cmd.do('from pymol import stored')
cmd.do('from pymol import selector')
cmd.do(' ')
cmd.do('# using numpy for linear algebra')
cmd.do('import numpy')
cmd.do(' ')
cmd.do('def optAlignRNA( sel1, sel2 ):')
cmd.do(' """')
cmd.do(' optAlignRNA performs the Kabsch alignment algorithm upon the C1' carbons of two selections.')
cmd.do(' Example: optAlignRNA 1JU7 and i. 1-16 and n. C1', 1CLL and i. 4-146 and n. C1'')
cmd.do(' ')
cmd.do(' Two RMSDs are returned. One comes from the Kabsch algorithm and the other from')
cmd.do(' PyMOL based upon your selections.')
cmd.do(' ')
cmd.do(' This function can be run in a for loop to fit multiple structures with a common prefix name:')
cmd.do(' ')
cmd.do(' for x in cmd.get_names(): optAlignRNA(x, "1JU7_0001")')
cmd.do(' ')
cmd.do(' or get the rmsds for all combinations, do the following:')
cmd.do(' ')
cmd.do(' [[optAlignRNA(x, y) for x in cmd.get_names()] for y in cmd.get_names()]')
cmd.do('')
cmd.do(' """')
cmd.do(' cmd.reset()')
cmd.do(' ')
cmd.do(' # make the lists for holding coordinates')
cmd.do(' # partial lists')
cmd.do(' stored.sel1 = []')
cmd.do(' stored.sel2 = []')
cmd.do(' # full lists')
cmd.do(' stored.mol1 = []')
cmd.do(' stored.mol2 = []')
cmd.do(' ')
cmd.do(' # -- CUT HERE')
cmd.do(' sel1 += " and N. C1'"')
cmd.do(' sel2 += " and N. C1'"')
cmd.do(' # -- CUT HERE')
cmd.do(' ')
cmd.do(' # Get the selected coordinates. We')
cmd.do(' # align these coords.')
cmd.do(' cmd.iterate_state(1, selector.process(sel1), "stored.sel1.append([x,y,z])")')
cmd.do(' cmd.iterate_state(1, selector.process(sel2), "stored.sel2.append([x,y,z])")')
cmd.do(' ')
cmd.do(' # get molecule name')
cmd.do(' mol1 = cmd.identify(sel1,1)[0][0]')
cmd.do(' mol2 = cmd.identify(sel2,1)[0][0]')
cmd.do(' ')
cmd.do(' # Get all molecule coords. We do this because')
cmd.do(' # we have to rotate the whole molcule, not just')
cmd.do(' # the aligned selection')
cmd.do(' cmd.iterate_state(1, mol1, "stored.mol1.append([x,y,z])")')
cmd.do(' cmd.iterate_state(1, mol2, "stored.mol2.append([x,y,z])")')
cmd.do(' ')
cmd.do(' # check for consistency')
cmd.do(' assert len(stored.sel1) == len(stored.sel2)')
cmd.do(' L = len(stored.sel1)')
cmd.do(' assert L > 0')
cmd.do(' ')
cmd.do(' # must alway center the two proteins to avoid')
cmd.do(' # affine transformations. Center the two proteins')
cmd.do(' # to their selections.')
cmd.do(' COM1 = numpy.sum(stored.sel1,axis=0) / float(L)')
cmd.do(' COM2 = numpy.sum(stored.sel2,axis=0) / float(L)')
cmd.do(' stored.sel1 -= COM1')
cmd.do(' stored.sel2 -= COM2')
cmd.do(' ')
cmd.do(' # Initial residual, see Kabsch.')
cmd.do(' E0 = numpy.sum( numpy.sum(stored.sel1 * stored.sel1,axis=0),axis=0) + numpy.sum( numpy.sum(stored.sel2 * stored.sel2,axis=0),axis=0)')
cmd.do(' ')
cmd.do(' #')
cmd.do(' # This beautiful step provides the answer. V and Wt are the orthonormal')
cmd.do(' # bases that when multiplied by each other give us the rotation matrix, U.')
cmd.do(' # S, (Sigma, from SVD) provides us with the error! Isn't SVD great!')
cmd.do(' V, S, Wt = numpy.linalg.svd( numpy.dot( numpy.transpose(stored.sel2), stored.sel1))')
cmd.do(' ')
cmd.do(' # we already have our solution, in the results from SVD.')
cmd.do(' # we just need to check for reflections and then produce')
cmd.do(' # the rotation. V and Wt are orthonormal, so their det's')
cmd.do(' # are +/-1.')
cmd.do(' reflect = float(str(float(numpy.linalg.det(V) * numpy.linalg.det(Wt))))')
cmd.do(' ')
cmd.do(' if reflect == -1.0:')
cmd.do(' S[-1] = -S[-1]')
cmd.do(' V[:,-1] = -V[:,-1]')
cmd.do(' ')
cmd.do(' RMSD = E0 - (2.0 * sum(S))')
cmd.do(' RMSD = numpy.sqrt(abs(RMSD / L))')
cmd.do(' ')
cmd.do(' #U is simply V*Wt')
cmd.do(' U = numpy.dot(V, Wt)')
cmd.do(' ')
cmd.do(' # rotate and translate the molecule')
cmd.do(' stored.sel2 = numpy.dot((stored.mol2 - COM2), U)')
cmd.do(' stored.sel2 = stored.sel2.tolist()')
cmd.do(' # center the molecule')
cmd.do(' stored.sel1 = stored.mol1 - COM1')
cmd.do(' stored.sel1 = stored.sel1.tolist()')
cmd.do(' ')
cmd.do(' # let PyMol know about the changes to the coordinates')
cmd.do(' cmd.alter_state(1,mol1,"(x,y,z)=stored.sel1.pop(0)")')
cmd.do(' cmd.alter_state(1,mol2,"(x,y,z)=stored.sel2.pop(0)")')
cmd.do(' ')
cmd.do(' #print("Moved: %s Reference: %s RMSD = %f" % mol1, mol2, RMSD)')
cmd.do(' print("% s, % s,% 5.3f" % (mol1, mol2, RMSD))')
cmd.do(' ')
cmd.do(' # make the alignment OBVIOUS')
cmd.do(' cmd.hide("everything")')
cmd.do(' cmd.show("ribbon", sel1 + " or " + sel2)')
cmd.do(' cmd.color("gray70", mol1 )')
cmd.do(' cmd.color("magenta", mol2 )')
cmd.do(' cmd.color("red", "visible")')
cmd.do(' cmd.show("ribbon", "not visible")')
cmd.do(' cmd.center("visible")')
cmd.do(' cmd.orient()')
cmd.do(' cmd.zoom("visible")')
cmd.do(' ')
cmd.do('cmd.extend("optAlignRNA", optAlignRNA)')
cmd.do('python end')
"""
cmd.do('python')
cmd.do(' ##############################################################################')
cmd.do('#')
cmd.do('# @SUMMARY: -- QKabsch.py. A python implementation of the optimal superposition')
cmd.do('# of two sets of vectors as proposed by Kabsch 1976 & 1978.')
cmd.do('#')
cmd.do('# @AUTHOR: <NAME>')
cmd.do('# @COPYRIGHT: <NAME> (C), 2005-2007')
cmd.do('# @LICENSE: Released under GPL:')
cmd.do('# This program is free software; you can redistribute it and/or modify')
cmd.do('# it under the terms of the GNU General Public License as published by')
cmd.do('# the Free Software Foundation; either version 2 of the License, or')
cmd.do('# (at your option) any later version.')
cmd.do('# This program is distributed in the hope that it will be useful, but WITHOUT')
cmd.do('# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS')
cmd.do('# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.')
cmd.do('#')
cmd.do('# You should have received a copy of the GNU General Public License along with')
cmd.do('# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin')
cmd.do('# Street, Fifth Floor, Boston, MA 02110-1301, USA ')
cmd.do('#')
cmd.do('# DATE : 2007-01-01')
cmd.do('# REV : 2')
cmd.do('# REQUIREMENTS: numpy')
cmd.do('#')
cmd.do('#')
cmd.do('# Modified optAlign.py to use C1' carbon atoms of RNA for alignment.')
cmd.do('# Jan. 29, 2020 ')
cmd.do('# <NAME>, PhD')
cmd.do('# Univ. of Oklahoma Health Sciences Center')
cmd.do('#')
cmd.do('#############################################################################')
cmd.do('from array import *')
cmd.do(' ')
cmd.do('# system stuff')
cmd.do('import os')
cmd.do('import copy')
cmd.do(' ')
cmd.do('# pretty printing')
cmd.do('import pprint')
cmd.do(' ')
cmd.do('# for importing as a plugin into PyMol')
cmd.do('from pymol import cmd')
cmd.do('from pymol import stored')
cmd.do('from pymol import selector')
cmd.do(' ')
cmd.do('# using numpy for linear algebra')
cmd.do('import numpy')
cmd.do(' ')
cmd.do('def optAlignRNA( sel1, sel2 ):')
cmd.do(' """')
cmd.do(' optAlignRNA performs the Kabsch alignment algorithm upon the C1' carbons of two selections.')
cmd.do(' Example: optAlignRNA 1JU7 and i. 1-16 and n. C1', 1CLL and i. 4-146 and n. C1'')
cmd.do(' ')
cmd.do(' Two RMSDs are returned. One comes from the Kabsch algorithm and the other from')
cmd.do(' PyMOL based upon your selections.')
cmd.do(' ')
cmd.do(' This function can be run in a for loop to fit multiple structures with a common prefix name:')
cmd.do(' ')
cmd.do(' for x in cmd.get_names(): optAlignRNA(x, "1JU7_0001")')
cmd.do(' ')
cmd.do(' or get the rmsds for all combinations, do the following:')
cmd.do(' ')
cmd.do(' [[optAlignRNA(x, y) for x in cmd.get_names()] for y in cmd.get_names()]')
cmd.do('')
cmd.do(' """')
cmd.do(' cmd.reset()')
cmd.do(' ')
cmd.do(' # make the lists for holding coordinates')
cmd.do(' # partial lists')
cmd.do(' stored.sel1 = []')
cmd.do(' stored.sel2 = []')
cmd.do(' # full lists')
cmd.do(' stored.mol1 = []')
cmd.do(' stored.mol2 = []')
cmd.do(' ')
cmd.do(' # -- CUT HERE')
cmd.do(' sel1 += " and N. C1'"')
cmd.do(' sel2 += " and N. C1'"')
cmd.do(' # -- CUT HERE')
cmd.do(' ')
cmd.do(' # Get the selected coordinates. We')
cmd.do(' # align these coords.')
cmd.do(' cmd.iterate_state(1, selector.process(sel1), "stored.sel1.append([x,y,z])")')
cmd.do(' cmd.iterate_state(1, selector.process(sel2), "stored.sel2.append([x,y,z])")')
cmd.do(' ')
cmd.do(' # get molecule name')
cmd.do(' mol1 = cmd.identify(sel1,1)[0][0]')
cmd.do(' mol2 = cmd.identify(sel2,1)[0][0]')
cmd.do(' ')
cmd.do(' # Get all molecule coords. We do this because')
cmd.do(' # we have to rotate the whole molcule, not just')
cmd.do(' # the aligned selection')
cmd.do(' cmd.iterate_state(1, mol1, "stored.mol1.append([x,y,z])")')
cmd.do(' cmd.iterate_state(1, mol2, "stored.mol2.append([x,y,z])")')
cmd.do(' ')
cmd.do(' # check for consistency')
cmd.do(' assert len(stored.sel1) == len(stored.sel2)')
cmd.do(' L = len(stored.sel1)')
cmd.do(' assert L > 0')
cmd.do(' ')
cmd.do(' # must alway center the two proteins to avoid')
cmd.do(' # affine transformations. Center the two proteins')
cmd.do(' # to their selections.')
cmd.do(' COM1 = numpy.sum(stored.sel1,axis=0) / float(L)')
cmd.do(' COM2 = numpy.sum(stored.sel2,axis=0) / float(L)')
cmd.do(' stored.sel1 -= COM1')
cmd.do(' stored.sel2 -= COM2')
cmd.do(' ')
cmd.do(' # Initial residual, see Kabsch.')
cmd.do(' E0 = numpy.sum( numpy.sum(stored.sel1 * stored.sel1,axis=0),axis=0) + numpy.sum( numpy.sum(stored.sel2 * stored.sel2,axis=0),axis=0)')
cmd.do(' ')
cmd.do(' #')
cmd.do(' # This beautiful step provides the answer. V and Wt are the orthonormal')
cmd.do(' # bases that when multiplied by each other give us the rotation matrix, U.')
cmd.do(' # S, (Sigma, from SVD) provides us with the error! Isn't SVD great!')
cmd.do(' V, S, Wt = numpy.linalg.svd( numpy.dot( numpy.transpose(stored.sel2), stored.sel1))')
cmd.do(' ')
cmd.do(' # we already have our solution, in the results from SVD.')
cmd.do(' # we just need to check for reflections and then produce')
cmd.do(' # the rotation. V and Wt are orthonormal, so their det's')
cmd.do(' # are +/-1.')
cmd.do(' reflect = float(str(float(numpy.linalg.det(V) * numpy.linalg.det(Wt))))')
cmd.do(' ')
cmd.do(' if reflect == -1.0:')
cmd.do(' S[-1] = -S[-1]')
cmd.do(' V[:,-1] = -V[:,-1]')
cmd.do(' ')
cmd.do(' RMSD = E0 - (2.0 * sum(S))')
cmd.do(' RMSD = numpy.sqrt(abs(RMSD / L))')
cmd.do(' ')
cmd.do(' #U is simply V*Wt')
cmd.do(' U = numpy.dot(V, Wt)')
cmd.do(' ')
cmd.do(' # rotate and translate the molecule')
cmd.do(' stored.sel2 = numpy.dot((stored.mol2 - COM2), U)')
cmd.do(' stored.sel2 = stored.sel2.tolist()')
cmd.do(' # center the molecule')
cmd.do(' stored.sel1 = stored.mol1 - COM1')
cmd.do(' stored.sel1 = stored.sel1.tolist()')
cmd.do(' ')
cmd.do(' # let PyMol know about the changes to the coordinates')
cmd.do(' cmd.alter_state(1,mol1,"(x,y,z)=stored.sel1.pop(0)")')
cmd.do(' cmd.alter_state(1,mol2,"(x,y,z)=stored.sel2.pop(0)")')
cmd.do(' ')
cmd.do(' #print("Moved: %s Reference: %s RMSD = %f" % mol1, mol2, RMSD)')
cmd.do(' print("% s, % s,% 5.3f" % (mol1, mol2, RMSD))')
cmd.do(' ')
cmd.do(' # make the alignment OBVIOUS')
cmd.do(' cmd.hide("everything")')
cmd.do(' cmd.show("ribbon", sel1 + " or " + sel2)')
cmd.do(' cmd.color("gray70", mol1 )')
cmd.do(' cmd.color("magenta", mol2 )')
cmd.do(' cmd.color("red", "visible")')
cmd.do(' cmd.show("ribbon", "not visible")')
cmd.do(' cmd.center("visible")')
cmd.do(' cmd.orient()')
cmd.do(' cmd.zoom("visible")')
cmd.do(' ')
cmd.do('cmd.extend("optAlignRNA", optAlignRNA)')
cmd.do('python end')
| 2.3125 | 2 |
2020/day11.py | asmeurer/advent-of-code | 0 | 17747 | <reponame>asmeurer/advent-of-code<filename>2020/day11.py
test_input = """
L.LL.LL.LL
LLLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLLL
L.LLLLLL.L
L.LLLLL.LL
"""
test_input2 = """
.......#.
...#.....
.#.......
.........
..#L....#
....#....
.........
#........
...#.....
"""
test_input3 = """
.............
.L.L.#.#.#.#.
.............
"""
test_input4 = """
.##.##.
#.#.#.#
##...##
...L...
##...##
#.#.#.#
.##.##.
"""
input = """
LL.LL.LLLLLL.LLLLLLLLLLLLLLLLLL.LLLLL..LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLL.LL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LL.LLLLLLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
.LL...LL.L.L....LL..LL..L.L.L..L.....L...LL.....LLL..L..L..L.....L.L..LLLL...LL.LL.L.......
LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLL..LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LL.LLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LL.L......L...LL....L...L.LL.L.....L.LL.L....L...LLL....LL.....LL.L.LLL...LL.L...LLL.L.L...
LLLLLLLLLLLL.LLLLLLLL.L.LL.L.LLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLLLLL.LL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.L.LLLLL.LLLLLLLLLLLL.LLLL.LLLLLLL..LLLLLL.LLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLL.L.LL.LLLLL
.LLLL.LLLLLL.LLLLLLLL.LLLLLLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
...L..L......L..L.L.......LL...L.LL.L...LL...L..LL....L....L.L..L...L...L.L.....LL.....L..L
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLL.LL
LLLLL.LLLLLLLL.LL.LLLLLLLL.LLLL.LLLLLL.LLLLLLLLLLL.L.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLL.LLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.L.LLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLLLLLLLL.LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
.......LL.L.L...LL..L....LL....L.L.L....L......L..LL...LL.LLL..L....L......L.LLL.L.....LLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLLLLLL.LLLLLLLLL.LLLL.L.LLLL.LLLLLLLL.LLLLLL.L.LLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLLLLLLL.
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLL.LLL.LLLLLLLL.LLLL.LLLLLLLL.LLLLLL.LLL..LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLLLLL.LLLLLLL
LLLLL.LLLLLL.LL.LLLLLLLLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLLLLLLL.LLL.LLLL.LLLLLLLLLLLLLLLLL
.L........L..L.L.LLLLL.......LL.......L..L.L..LL.L......L.......LLL..LLL.LL...L.L...L.LL.L.
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL..LLLLL.LLLLLLLL.LLLL.LLL..LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLL..LLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
..L..LL.......L.LLLL.L.....L...L.LL...LLLLL.L.....L..L...LL.LL..L..LLLLLL..........LL.....L
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL..LLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL..LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL.LL.LLLLLLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLL.LLLL..LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
L...LL....L..L..LL.........L.L...LL..LL.L....L...........LL.L.......L.L.L.......L..L..LL..L
LLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLLL.LL.LLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.L.LLLLLLLLLLL.LL.LLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLL.L.LLLL.LLLLLLLLLLLL..L.LLLL.L.LL.LLLLLLLL.LLLLLLLLLLLLLLLL.
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLL.LLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL
.....L.LLL...LL..LL.....L....LL.......L...LL..L..L...L...L.LL.LL.LL...LL..LLL.L..LLL..LLLL.
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLLLLLLLL.L.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLLLLLL.LLLLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL..LLL.LLLLLLLLLLLLLL.LLLL..LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLLLLL.LL.LLLLLLLLLLLLL.LL.LLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
..L..LL.........L....L.L.L.L...L....L...........LL....L..L...L.LL..L..LL.L..LL..L..L.L..L.L
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
....L............L....LL......L.LLL.LLL....LL.....L..L.LL.L........L..L......L.LLL..LL..LL.
LL.LLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.L.LLLLLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL.LLLLLLLL..LLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL..LLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLLLLLL.LL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.L.LLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
.L......LLL...L.L.LL.L.....LL.L..L.L.LLLLL....LL..L...L..L.....L.L...L...L.L.LL.LL.L.......
LLLLLLLLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLLLLLLLLLL..LLLLLLLLL.LLLLLL.LLLLLLL.LLLLL.LLLLL..LLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
"""
import numpy as np
val = {'L': -1, '#': 1, '.': 0}
rval = {v: k for k, v in val.items()}
def strtoarray(text):
return np.array([[val[i] for i in line] for line in
text.strip().splitlines()])
def arraytostr(a):
if a.ndim == 1:
a = a.reshape((1, a.size))
return '\n'.join([''.join([rval[i] for i in row]) for row in a])
def adjacent(a, i, j):
rows, cols = a.shape
adj = []
for newi in [i - 1, i, i + 1]:
for newj in [j - 1, j, j + 1]:
if (newi, newj) == (i, j):
continue
if newi < 0 or newj < 0:
continue
if newi >= rows or newj >= cols:
continue
adj.append(a[newi, newj])
return np.array(adj)
def adjacent2(a, i, j):
rows, cols = a.shape
adj = []
for idir in [-1, 0, 1]:
for jdir in [-1, 0, 1]:
if idir == jdir == 0:
continue
for x in range(1, max(rows, cols)):
newi = i + idir*x
newj = j + jdir*x
if newi < 0 or newi >= rows or newj < 0 or newj >= cols:
break
c = a[newi, newj]
if c in [-1, 1]:
adj.append(c)
break
return np.array(adj)
def apply_rules(a):
newa = a.copy()
rows, cols = a.shape
changed = False
for i in range(rows):
for j in range(cols):
if a[i, j] == 0:
continue
adj = adjacent(a, i, j)
if a[i, j] == -1 and np.sum(adj==1) == 0:
changed = True
newa[i, j] = 1
elif a[i, j] == 1 and np.sum(adj==1) >= 4:
changed = True
newa[i, j] = -1
return newa, changed
def generations(a):
n = 0
while True:
print("Generation", n)
print(arraytostr(a))
print()
a, changed = apply_rules(a)
if not changed:
return a
n += 1
def apply_rules2(a):
newa = a.copy()
rows, cols = a.shape
changed = False
for i in range(rows):
for j in range(cols):
if a[i, j] == 0:
continue
adj = adjacent2(a, i, j)
if a[i, j] == -1 and np.sum(adj==1) == 0:
changed = True
newa[i, j] = 1
elif a[i, j] == 1 and np.sum(adj==1) >= 5:
changed = True
newa[i, j] = -1
return newa, changed
def generations2(a):
n = 0
while True:
print("Generation", n)
print(arraytostr(a))
print()
a, changed = apply_rules2(a)
if not changed:
return a
n += 1
print("Day 11")
print("Part 1")
print("Test input")
testa = strtoarray(test_input)
print(test_input)
print(testa)
print(arraytostr(testa))
print("Adjacent to 0, 0", arraytostr(adjacent(testa, 0, 0)))
print("Adjacent to 2, 2", arraytostr(adjacent(testa, 2, 2)))
test_finala = generations(testa)
print(np.sum(test_finala == 1))
print("Puzzle input")
a = strtoarray(input)
finala = generations(a)
print(np.sum(finala == 1))
print("Part 2")
print("Test input")
testa2 = strtoarray(test_input2)
assert testa2[4, 3] == -1
print(adjacent2(testa2, 4, 3))
testa3 = strtoarray(test_input3)
assert testa3[1, 3] == -1
print(adjacent2(testa3, 1, 3))
testa4 = strtoarray(test_input4)
assert testa4[3, 3] == -1
print(adjacent2(testa4, 3, 3))
test_finala = generations2(testa)
print(np.sum(test_finala==1))
print("Puzzle input")
finala = generations2(a)
print(np.sum(finala == 1))
| 1.296875 | 1 |
final/runner_2.py | Pluriscient/sma2c-ipd | 0 | 17748 | <gh_stars>0
from SMA2CAgent import SMA2CAgent
from A2CAgent import A2CAgent
from RandomAgent import RandomAgent
# from .SMA2CAgent import SMA2CAgent
import gym
import numpy as np
from IPD_fixed import IPDEnv
import axelrod
import time
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--rounds", help='number of rounds to play per episode', type=int, default=20)
parser.add_argument("--episodes", help='number of episodes to play', type=int, default=1000)
parser.add_argument("--seed", help='random seed, -1 if random', type=int, default=-1)
parser.add_argument("--output", help="output folder", default=f'output-{time.time():.0f}')
parser.add_argument("--pure-a2c", help="Don't use an encoder", action='store_true')
parser.add_argument("--alpha", help='LR of encoder', type=float)
parser.add_argument("--beta", help = 'LR of A2C agent', type=float)
parser.add_argument("--lstm-dims", help='LSTM dimensions', type=int)
parser.add_argument("--encoder-fc", help='dimensions of encoder dense layers',type=int, action='append')
parser.add_argument("--a2c-fc", help='dimensions of a2c hidden layers', type=int, action='append')
parser.add_argument("--latent-dims", help='dimensions of code', type=int)
parser.add_argument("opponents", help='opponents that the bot should face', nargs="*")
parser.add_argument("--random", help="Don't use an agent, just random", action='store_true')
# parser.add_argument("")
args = parser.parse_args()
opponents = []
strats = dict([(s.name.lower(), s) for s in axelrod.all_strategies])
for opp in args.opponents:
if opp not in strats:
print(f'{opp} not found in strats')
s = strats[opp]
opponents.append(s)
env = IPDEnv({'rounds': args.rounds, 'opponents' : opponents})
seed = args.seed if args.seed != -1 else None
env.seed(seed=seed)
# remove empty values
config = {k: v for k, v in vars(args).items() if v is not None}
if config['pure_a2c']:
print("____USING PURE A2C_____")
agent= A2CAgent(env, config)
elif config['random']:
print("__RANDOM AGENT___")
agent = RandomAgent(env, config)
else:
print("____USING SMA2C______")
agent = SMA2CAgent(env, config)
# obs = env.reset()
# action = agent.act(obs, 0, 0, 1)
# print(f'resulting action: {action}')
# encodings_before = np.array(agent.encode_run(axelrod.Cooperator()))
# print(f'encodings before: {encodings_before}')
agent.run(episodes=args.episodes)
# encodings_after_c = np.array(agent.encode_run(axelrod.Cooperator()))
# encodings_after_d = np.array(agent.encode_run(axelrod.Defector()))
# print(f'encodings after: {encodings_after_c}')
# print(encodings_after_d)
agent.save()
| 2.234375 | 2 |
295-find-median-from-data-stream/295-find-median-from-data-stream.py | Dawit-Getachew/A2SV_Practice | 0 | 17749 | <gh_stars>0
import heapq as h
class MedianFinder:
def __init__(self):
self.rightHalf = []
self.leftHalf = []
def addNum(self, num: int) -> None:
if len(self.leftHalf) > len(self.rightHalf):
temp = h.heappush(self.leftHalf, -num)
temp2 = h.heappop(self.leftHalf)
h.heappush(self.rightHalf, -temp2)
else:
temp = h.heappush(self.rightHalf, num)
temp2 = h.heappop(self.rightHalf)
h.heappush(self.leftHalf, -temp2)
def findMedian(self) -> float:
if len(self.leftHalf) == len(self.rightHalf):
temp = (-self.leftHalf[0] + self.rightHalf[0])/2
return temp
else:
return -self.leftHalf[0]
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian() | 3.140625 | 3 |
mottak-arkiv-service/tests/routers/mappers/test_metadatafil.py | omBratteng/mottak | 4 | 17750 | <reponame>omBratteng/mottak
import pytest
from app.domain.models.Metadatafil import Metadatafil, MetadataType
from app.exceptions import InvalidContentType
from app.routers.mappers.metadafil import _get_file_content, metadatafil_mapper, _content_type2metadata_type
def test__content_type2metadata_type__success():
"""
GIVEN the string 'text/xml' as content_type
WHEN calling the method _content_type2metadata_type
THEN check that return value is MetadataType.XML_METS
"""
expected = MetadataType.XML_METS
actual = _content_type2metadata_type('text/xml')
assert actual == expected
def test__content_type2metadata_type__failure():
"""
GIVEN the string 'text' as content_type
WHEN calling the method _content_type2metadata_type
THEN check that a InvalidContentType Exception has been raised
"""
with pytest.raises(InvalidContentType):
_content_type2metadata_type('text')
def test__get_file_content(testfile, testfile_content):
"""
GIVEN a file with testdata where the content is an METS/XML file
WHEN calling the method _get_file_content
THEN check that the returned string is correct
"""
expected = testfile_content
actual = _get_file_content(testfile)
assert actual == expected
def test_metadatafil_mapper(testfile, testfile_content):
"""
GIVEN a file with testdata where the content is an METS/XML file
WHEN calling the method metadatafil_mapper
THEN check that the returned Metadatafil object is correct
"""
expected = Metadatafil(
filnavn="df53d1d8-39bf-4fea-a741-58d472664ce2.xml",
type_=MetadataType.XML_METS,
innhold=testfile_content)
actual = metadatafil_mapper(testfile)
assert vars(actual) == vars(expected)
| 2.515625 | 3 |
src/niweb/apps/noclook/templatetags/rack_tags.py | emjemj/ni | 0 | 17751 | from django import template
register = template.Library()
RACK_SIZE_PX = 20
MARGIN_HEIGHT = 2
def _rack_unit_to_height(units):
# for every unit over 1 add a 2 px margin
margin = (units - 1) * MARGIN_HEIGHT
return units * RACK_SIZE_PX + margin
def _equipment_spacer(units):
return {
'units': units,
'spacer': True,
'height': "{}px".format(_rack_unit_to_height(units)),
}
def _rack_sort(item):
# Sort by rack position, sencoded by unit size
pos = int(item.get('node').data.get('rack_position', -1))
size = int(item.get('node').data.get('rack_units', 0)) * -1
return (pos, size)
def _equipment(item):
data = item.get('node').data
units = int(data.get('rack_units', 1))
return {
'units': units,
'position': int(data.get('rack_position', 0) or 0),
'position_end': units + int(data.get('rack_position', 1)) - 1,
'height': "{}px".format(_rack_unit_to_height(units)),
'sub_equipment': [],
'is_back': data.get('rack_back'),
'data': data,
}
def place_equipment(view_data, current_idx, last_eq, result):
spacing = view_data['position'] - current_idx
if spacing < 0:
# Equipment overlaps with previous
last_eq['sub_equipment'].append(view_data)
else:
if spacing > 0:
result.append(_equipment_spacer(spacing))
result.append(view_data)
new_idx = view_data['position'] + view_data['units']
return new_idx, view_data
return current_idx, last_eq
@register.inclusion_tag('noclook/tags/rack.html')
def noclook_rack(rack, equipment):
if equipment:
equipment.sort(key=_rack_sort)
racked_equipment = []
racked_equipment_back = []
unracked_equipment = []
# mem
front_idx = 1
front_last_eq = None
back_idx = 1
back_last_eq = None
for item in equipment:
view_data = _equipment(item)
is_rack_front = not view_data.get('is_back')
if view_data['position'] > 0:
if is_rack_front:
front_idx, front_last_eq = place_equipment(view_data, front_idx, front_last_eq, racked_equipment)
else:
back_idx, back_last_eq = place_equipment(view_data, back_idx, back_last_eq, racked_equipment_back)
else:
unracked_equipment.append(item)
return {
'rack_size': _rack_unit_to_height(rack.data.get('rack_units', 42)),
'racked_equipment': racked_equipment,
'racked_equipment_back': racked_equipment_back,
'unracked_equipment': unracked_equipment,
}
@register.filter
def rack_sort(equipment):
if equipment:
equipment.sort(key=_rack_sort, reverse=True)
return equipment
| 2.578125 | 3 |
Cryptography/Exp-1-Shamirs-Secret-Sharing/main.py | LuminolT/Cryptographic | 0 | 17752 | import numpy as np
import matplotlib.pyplot as plt
from shamir import *
from binascii import hexlify
# img = plt.imread('cat.png')
# plt.imshow(img)
# plt.show()
s = 'TEST_STRING'.encode()
print("Original secret:", hexlify(s))
l = Shamir.split(3, 5, '12345'.encode())
for idx, item in l:
print("Share {}: {}".format(str(idx), hexlify(item)))
shares = l[1:4]
secret = Shamir.combine(shares)
print(f'Secret is : {secret.decode()}') | 2.8125 | 3 |
P20-Stack Abstract Data Type/Stack - Reverse Stack.py | necrospiritus/Python-Working-Examples | 0 | 17753 | """Reverse stack is using a list where the top is at the beginning instead of at the end."""
class Reverse_Stack:
def __init__(self):
self.items = []
def is_empty(self): # test to see whether the stack is empty.
return self.items == []
def push(self, item): # adds a new item to the base of the stack.
self.items.insert(0, item)
def pop(self): # removes the base item from the stack.
return self.items.pop(0)
def peek(self): # return the base item from the stack.
return self.items[0]
def size(self): # returns the number of items on the stack.
return len(self.items)
s = Reverse_Stack()
print(s.is_empty())
s.push(4)
s.push("Dog")
print(s.peek())
s.push("Cat")
print(s.size())
print(s.is_empty())
s.pop()
print(s.peek())
print(s.size())
| 4.28125 | 4 |
gerapy/cmd/server.py | awesome-archive/Gerapy | 1 | 17754 | from gerapy.server.manage import manage
import sys
def server():
# Call django cmd
manage()
| 1.296875 | 1 |
client/walt/client/term.py | dia38/walt-python-packages | 4 | 17755 | #!/usr/bin/env python
import sys, tty, termios, array, fcntl, curses
class TTYSettings(object):
def __init__(self):
self.tty_fd = sys.stdout.fileno()
# save
self.saved = termios.tcgetattr(self.tty_fd)
self.win_size = self.get_win_size()
self.rows, self.cols = self.win_size[0], self.win_size[1]
curses.setupterm()
self.num_colors = curses.tigetnum("colors")
def set_raw_no_echo(self):
# set raw mode
tty.setraw(self.tty_fd, termios.TCSADRAIN)
# disable echo
new = termios.tcgetattr(self.tty_fd)
new[3] &= ~termios.ECHO
termios.tcsetattr(self.tty_fd, termios.TCSADRAIN, new)
def restore(self):
# return saved conf
termios.tcsetattr(self.tty_fd, termios.TCSADRAIN, self.saved)
def get_win_size(self):
buf = array.array('h', [0, 0, 0, 0])
fcntl.ioctl(self.tty_fd, termios.TIOCGWINSZ, buf, True)
return buf
| 2.296875 | 2 |
improver_tests/calibration/ensemble_calibration/test_CalibratedForecastDistributionParameters.py | cpelley/improver | 0 | 17756 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the
`ensemble_calibration.CalibratedForecastDistributionParameters`
class.
"""
import unittest
import numpy as np
from iris.cube import CubeList
from iris.tests import IrisTest
from numpy.testing import assert_array_almost_equal
from improver.calibration.ensemble_calibration import (
CalibratedForecastDistributionParameters as Plugin,
)
from improver.calibration.ensemble_calibration import (
EstimateCoefficientsForEnsembleCalibration,
)
from improver.metadata.constants.attributes import MANDATORY_ATTRIBUTE_DEFAULTS
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
from improver.utilities.warnings_handler import ManageWarnings
from .helper_functions import EnsembleCalibrationAssertions, SetupCubes
from .test_EstimateCoefficientsForEnsembleCalibration import SetupExpectedCoefficients
class SetupCoefficientsCubes(SetupCubes, SetupExpectedCoefficients):
"""Set up coefficients cubes for testing."""
@ManageWarnings(
ignored_messages=[
"Collapsing a non-contiguous coordinate.",
"invalid escape sequence",
],
warning_types=[UserWarning, DeprecationWarning],
)
def setUp(self):
"""Set up coefficients cubes for when either the ensemble mean or the
ensemble realizations have been used as the predictor. The coefficients
have been constructed from the same underlying set of ensemble
realizations, so application of these coefficients would be expected
to give similar results. The values for the coefficients used to
construct the coefficients cubes are taken from the
SetupExpectedCoefficients class. These coefficients are the
expected outputs from the tests to estimate the coefficients."""
super().setUp()
# Set up a coefficients cube when using the ensemble mean as the
# predictor.
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", desired_units="Celsius"
)
self.coeffs_from_mean = estimator.create_coefficients_cubelist(
self.expected_mean_pred_norm,
self.historic_temperature_forecast_cube,
CubeList([self.historic_temperature_forecast_cube]),
)
# Set up a timeshifted coefficients cube using the ensemble mean as a
# predictor.
forecast_timeshift_cube = self.historic_temperature_forecast_cube.copy()
for coord_name in ["time", "forecast_period"]:
forecast_timeshift_cube.coord(coord_name).points = [
_ + 3600 for _ in forecast_timeshift_cube.coord(coord_name).points
]
self.coeffs_from_mean_timeshift = estimator.create_coefficients_cubelist(
self.expected_mean_pred_norm,
forecast_timeshift_cube,
CubeList([forecast_timeshift_cube]),
)
# Set up a coefficients cube when using the ensemble mean as the
# predictor and separate coefficients at each point.
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", point_by_point=True, desired_units="Celsius"
)
point_by_point_predictor = np.stack(
[self.expected_mean_pred_norm] * 9
).T.reshape(4, 3, 3)
self.coeffs_from_mean_point_by_point = estimator.create_coefficients_cubelist(
point_by_point_predictor,
self.historic_temperature_forecast_cube,
CubeList([self.historic_temperature_forecast_cube]),
)
# Set up a coefficients cube when using the ensemble realization as the
# predictor.
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", desired_units="Celsius", predictor="realizations"
)
self.coeffs_from_realizations = estimator.create_coefficients_cubelist(
self.expected_realizations_norm,
self.historic_temperature_forecast_cube,
CubeList([self.historic_temperature_forecast_cube]),
)
# Set up a coefficients cube when using the ensemble realization as the
# predictor and separate coefficients at each point.
expected_realizations_each_site = [
array if array.ndim == 1 else np.squeeze(array)
for array in list(self.expected_realizations_each_site.values())
]
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", predictor="realizations", point_by_point=True
)
self.coeffs_from_realizations_sites = estimator.create_coefficients_cubelist(
expected_realizations_each_site,
self.historic_forecast_spot_cube,
CubeList([self.historic_temperature_forecast_cube]),
)
# # Set up a coefficients cube when using an additional predictor.
self.altitude = set_up_variable_cube(
np.ones((3, 3), dtype=np.float32), name="surface_altitude", units="m"
)
for coord in ["time", "forecast_reference_time", "forecast_period"]:
self.altitude.remove_coord(coord)
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", desired_units="Celsius"
)
self.coeffs_from_mean_alt = estimator.create_coefficients_cubelist(
self.expected_mean_pred_norm_alt,
self.historic_temperature_forecast_cube,
CubeList([self.historic_temperature_forecast_cube, self.altitude]),
)
# Some expected data that are used in various tests.
self.expected_loc_param_mean = np.array(
[
[273.7014, 274.6534, 275.4469],
[276.9385, 277.7636, 278.5570],
[279.6996, 280.1122, 281.2547],
],
dtype=np.float32,
)
self.expected_scale_param_mean = np.array(
[
[0.2316, 0.2342, 0.0168],
[0.0271, 0.0237, 0.0168],
[0.0634, 0.1151, 0.0116],
],
dtype=np.float32,
)
self.expected_loc_param_realizations = np.array(
[
[274.388, 275.3053, 275.4492],
[277.1295, 277.3866, 278.4672],
[280.2007, 280.3929, 281.2602],
],
dtype=np.float32,
)
self.expected_loc_param_realizations_sites = np.array(
[277.7531, 277.4529, 277.553, 277.2528], dtype=np.float32,
)
self.expected_scale_param_realizations_sites = np.array(
[0, 0, 0, 0], dtype=np.float32
)
self.expected_loc_param_mean_alt = np.array(
[
[275.18134, 276.18134, 277.01465],
[278.58133, 279.44797, 280.2813],
[281.48132, 281.91464, 283.11465],
],
dtype=np.float32,
)
self.expected_scale_param_mean_alt = np.array(
[
[0.4347, 0.4396, 0.0308],
[0.0503, 0.0438, 0.0308],
[0.1184, 0.2157, 0.0211],
],
dtype=np.float32,
)
# Create output cubes with the expected data.
self.expected_loc_param_mean_cube = set_up_variable_cube(
self.expected_loc_param_mean,
name="location_parameter",
units="K",
attributes=MANDATORY_ATTRIBUTE_DEFAULTS,
)
self.expected_scale_param_mean_cube = set_up_variable_cube(
self.expected_scale_param_mean,
name="scale_parameter",
units="Kelvin^2",
attributes=MANDATORY_ATTRIBUTE_DEFAULTS,
)
class Test__init__(IrisTest):
"""Test the __init__ method."""
def test_basic(self):
"""Test without specifying a predictor."""
plugin = Plugin()
self.assertEqual(plugin.predictor, "mean")
def test_with_predictor(self):
"""Test specifying the predictor."""
plugin = Plugin(predictor="realizations")
self.assertEqual(plugin.predictor, "realizations")
class Test__repr__(IrisTest):
"""Test the __repr__ method."""
def test_basic(self):
"""Test without the predictor."""
result = str(Plugin())
msg = "<CalibratedForecastDistributionParameters: " "predictor: mean>"
self.assertEqual(result, msg)
def test_with_predictor(self):
"""Test specifying the predictor."""
result = str(Plugin(predictor="realizations"))
msg = "<CalibratedForecastDistributionParameters: " "predictor: realizations>"
self.assertEqual(result, msg)
class Test__spatial_domain_match(SetupCoefficientsCubes):
""" Test the _spatial_domain_match method."""
def setUp(self):
super().setUp()
self.plugin = Plugin()
def test_matching(self):
"""Test case in which spatial domains match."""
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
self.plugin._spatial_domain_match()
def test_unmatching_x_axis_points(self):
"""Test when the points of the x dimension do not match."""
self.current_temperature_forecast_cube.coord(axis="x").bounds = (
self.current_temperature_forecast_cube.coord(axis="x").bounds + 2.0
)
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
msg = "The points or bounds of the x axis given by the current forecast"
with self.assertRaisesRegex(ValueError, msg):
self.plugin._spatial_domain_match()
def test_unmatching_x_axis_bounds(self):
"""Test when the bounds of the x dimension do not match."""
self.current_temperature_forecast_cube.coord(axis="x").bounds = [
[-35, -5],
[-5, 5],
[5, 35],
]
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
msg = "The points or bounds of the x axis given by the current forecast"
with self.assertRaisesRegex(ValueError, msg):
self.plugin._spatial_domain_match()
def test_unmatching_y_axis(self):
"""Test case in which the y-dimensions of the domains do not match."""
self.current_temperature_forecast_cube.coord(axis="y").bounds = (
self.current_temperature_forecast_cube.coord(axis="y").bounds + 2.0
)
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
msg = "The points or bounds of the y axis given by the current forecast"
with self.assertRaisesRegex(ValueError, msg):
self.plugin._spatial_domain_match()
def test_skipping_spot_forecast(self):
"""Test passing a spot forecast. In this case, the spatial domain
is not checked."""
self.plugin.current_forecast = self.current_forecast_spot_cube
self.plugin._spatial_domain_match()
class Test__calculate_location_parameter_from_mean(
SetupCoefficientsCubes, EnsembleCalibrationAssertions
):
"""Test the __calculate_location_parameter_from_mean method."""
def setUp(self):
"""Set-up coefficients and plugin for testing."""
super().setUp()
self.plugin = Plugin()
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_basic(self):
"""Test that the expected values for the location parameter are
calculated when using the ensemble mean. These expected values are
compared to the results when using the ensemble realizations to ensure
that the results are similar."""
location_parameter = self.plugin._calculate_location_parameter_from_mean()
self.assertCalibratedVariablesAlmostEqual(
location_parameter, self.expected_loc_param_mean
)
assert_array_almost_equal(
location_parameter, self.expected_loc_param_realizations, decimal=0,
)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_missing_additional_predictor(self):
"""Test that an error is raised if an additional predictor is expected
based on the contents of the coefficients cube."""
self.plugin.coefficients_cubelist = self.coeffs_from_mean_alt
msg = "The number of forecast predictors must equal the number"
with self.assertRaisesRegex(ValueError, msg):
self.plugin._calculate_location_parameter_from_mean()
class Test__calculate_location_parameter_from_realizations(
SetupCoefficientsCubes, EnsembleCalibrationAssertions
):
"""Test the _calculate_location_parameter_from_realizations method."""
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def setUp(self):
"""Set-up coefficients and plugin for testing."""
super().setUp()
self.plugin = Plugin()
self.plugin.current_forecast = self.current_temperature_forecast_cube
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_basic(self):
"""Test that the expected values for the location parameter are
calculated when using the ensemble realizations. These expected values
are compared to the results when using the ensemble mean to ensure
that the results are similar."""
self.plugin.coefficients_cubelist = self.coeffs_from_realizations
location_parameter = (
self.plugin._calculate_location_parameter_from_realizations()
)
self.assertCalibratedVariablesAlmostEqual(
location_parameter, self.expected_loc_param_realizations
)
assert_array_almost_equal(
location_parameter, self.expected_loc_param_mean, decimal=0
)
class Test__calculate_scale_parameter(
SetupCoefficientsCubes, EnsembleCalibrationAssertions
):
"""Test the _calculate_scale_parameter method."""
def setUp(self):
"""Set-up the plugin for testing."""
super().setUp()
self.plugin = Plugin()
self.plugin.current_forecast = self.current_temperature_forecast_cube
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_basic(self):
"""Test the scale parameter is calculated correctly."""
self.plugin.coefficients_cubelist = self.coeffs_from_mean
scale_parameter = self.plugin._calculate_scale_parameter()
self.assertCalibratedVariablesAlmostEqual(
scale_parameter, self.expected_scale_param_mean
)
class Test__create_output_cubes(SetupCoefficientsCubes, EnsembleCalibrationAssertions):
"""Test the _create_output_cubes method."""
def setUp(self):
"""Set-up the plugin for testing."""
super().setUp()
self.plugin = Plugin()
self.plugin.current_forecast = self.current_temperature_forecast_cube
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_basic(self):
"""Test that the cubes created containing the location and scale
parameter are formatted as expected."""
(
location_parameter_cube,
scale_parameter_cube,
) = self.plugin._create_output_cubes(
self.expected_loc_param_mean, self.expected_scale_param_mean
)
self.assertEqual(location_parameter_cube, self.expected_loc_param_mean_cube)
self.assertEqual(scale_parameter_cube, self.expected_scale_param_mean_cube)
class Test_process(SetupCoefficientsCubes, EnsembleCalibrationAssertions):
"""Test the process plugin."""
def setUp(self):
"""Set-up the plugin for testing."""
super().setUp()
self.plugin = Plugin()
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_diagnostic_match(self):
"""Test that an error is raised if the diagnostic_standard_name does
not match when comparing a forecast cube and coefficients cubelist."""
msg = "The forecast diagnostic"
with self.assertRaisesRegex(ValueError, msg):
self.plugin.process(
self.current_wind_speed_forecast_cube, self.coeffs_from_mean
)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_time_match(self):
"""Test that an error is raised if the time coordinates do
not match when comparing a forecast cube and coefficients cubelist."""
msg = "rounded forecast_period hours"
with self.assertRaisesRegex(ValueError, msg):
self.plugin.process(
self.current_temperature_forecast_cube, self.coeffs_from_mean_timeshift
)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_time_match_tolerate(self):
"""Test that no error is raised when using a coefficients file with
a mismatching forecast_period coordinate, if the
tolerate_time_mismatch option is enabled."""
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube,
self.coeffs_from_mean_timeshift,
tolerate_time_mismatch=True,
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data, self.expected_loc_param_mean
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_mean
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_variable_setting(self):
"""Test that the cubes passed into the plugin are allocated to
plugin variables appropriately."""
_, _ = self.plugin.process(
self.current_temperature_forecast_cube, self.coeffs_from_mean
)
self.assertEqual(
self.current_temperature_forecast_cube, self.plugin.current_forecast
)
self.assertEqual(self.coeffs_from_mean, self.plugin.coefficients_cubelist)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end(self):
"""An example end-to-end calculation. This repeats the test elements
above but all grouped together."""
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube, self.coeffs_from_mean
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data, self.expected_loc_param_mean
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_mean
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end_point_by_point(self):
"""An example end-to-end calculation when a separate set of
coefficients are computed for each grid point. This repeats the test
elements above but all grouped together."""
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube, self.coeffs_from_mean_point_by_point
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data, self.expected_loc_param_mean
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_mean
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end_point_by_point_sites_realizations(self):
"""An example end-to-end calculation when a separate set of
coefficients are computed for each site using the realizations as the
predictor. This repeats the test elements above but all grouped together."""
plugin = Plugin(predictor="realizations")
calibrated_forecast_predictor, calibrated_forecast_var = plugin.process(
self.current_forecast_spot_cube, self.coeffs_from_realizations_sites
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data,
self.expected_loc_param_realizations_sites,
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_realizations_sites
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end_with_additional_predictor(self):
"""Test that the expected calibrated forecast is generated, if an
additional predictor is provided."""
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube,
self.coeffs_from_mean_alt,
additional_fields=CubeList([self.altitude]),
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data, self.expected_loc_param_mean_alt
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_mean_alt
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end_with_mask(self):
"""An example end-to-end calculation, but making sure that the
areas that are masked within the landsea mask, are masked at the
end."""
# Construct a mask and encapsulate as a cube.
mask = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
mask_cube = self.current_temperature_forecast_cube[0].copy(data=mask)
# Convention for IMPROVER is that land points are ones and sea points
# are zeros in land-sea masks. In this case we want to mask sea points.
expected_mask = np.array(
[[False, True, True], [True, False, True], [True, True, False]]
)
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube,
self.coeffs_from_mean,
landsea_mask=mask_cube,
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data.data, self.expected_loc_param_mean
)
self.assertArrayEqual(calibrated_forecast_predictor.data.mask, expected_mask)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data.data, self.expected_scale_param_mean
)
self.assertArrayEqual(calibrated_forecast_var.data.mask, expected_mask)
if __name__ == "__main__":
unittest.main()
| 1.296875 | 1 |
rendaz/tests/test_daztools.py | veselosky/rendaz | 0 | 17757 | "Test handling/parsing of various DAZ Studio files"
from pathlib import Path
from tempfile import NamedTemporaryFile
from django.apps import apps
from rendaz.daztools import (
DSONFile,
ProductMeta,
manifest_files,
supplement_product_name,
)
TEST_DIR = Path(__file__).parent
def test_read_dson_compressed():
"Test reading compressed DSON files"
fname = TEST_DIR / "Sphere-compressed.duf"
duf = DSONFile(path=str(fname))
assert duf.path.name == "Sphere-compressed.duf"
assert duf.is_compressed
assert "asset_info" in duf.dson
def test_read_dson_uncompressed():
"Test reading uncompressed DSON files"
fname = TEST_DIR / "Sphere-uncompressed.duf"
duf = DSONFile(path=str(fname))
assert duf.path.name == "Sphere-uncompressed.duf"
assert duf.is_compressed is False
assert "asset_info" in duf.dson
def test_save_dson_compressed():
"Test write round trip, read uncompressed, write compressed, read back"
fname = TEST_DIR / "Sphere-uncompressed.duf"
duf = DSONFile(path=str(fname))
out = NamedTemporaryFile(mode="wt", delete=False)
tmpname = out.name
out.close()
try:
duf.save(tmpname, compress=True)
new = DSONFile(tmpname)
assert new.is_compressed
assert "asset_info" in new.dson
finally:
Path(tmpname).unlink()
def test_save_dson_uncompressed():
"Test write round trip, read compressed, write uncompressed, read back"
fname = TEST_DIR / "Sphere-compressed.duf"
duf = DSONFile(path=str(fname))
out = NamedTemporaryFile(mode="wt", delete=False)
tmpname = out.name
out.close()
try:
duf.save(tmpname, compress=False)
new = DSONFile(tmpname)
assert new.is_compressed is False
assert "asset_info" in new.dson
finally:
Path(tmpname).unlink()
def test_productmetafile_defaults():
production = apps.get_app_config("production")
it = ProductMeta(product_id="THETHING", stem_product_name="THETHING")
assert it.product_id == "THETHING"
assert isinstance(it.cms_files, set)
assert isinstance(it.dim_manifest_files, set)
assert isinstance(it.included_files, set)
def test_manifest_files():
expected = [
"Content/People/Genesis 8 Female/Characters/Aakash.duf",
"Content/People/Genesis 8 Female/Characters/Aakash.duf.png",
"Content/Runtime/Support/DAZ_3D_60599_Aakash_HD_for_Kala_8.dsa",
"Content/Runtime/Support/DAZ_3D_60599_Aakash_HD_for_Kala_8.dsx",
"Content/Runtime/Support/DAZ_3D_60599_Aakash_HD_for_Kala_8.jpg",
]
fname = TEST_DIR / "Manifest.dsx"
actual = list(manifest_files(fname))
assert actual == expected
def test_supplement_product_name():
expected = "Aakash HD for Kala 8"
fname = TEST_DIR / "Supplement.dsx"
actual = supplement_product_name(fname)
assert actual == expected
| 2.265625 | 2 |
src/core/models/graph2seq.py | talha1503/RL-based-Graph2Seq-for-NQG | 100 | 17758 | <gh_stars>10-100
import random
import string
from typing import Union, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..layers.common import EncoderRNN, DecoderRNN, dropout
from ..layers.attention import *
from ..layers.graphs import GraphNN
from ..utils.generic_utils import to_cuda, create_mask
from ..utils.constants import VERY_SMALL_NUMBER
class Graph2SeqOutput(object):
def __init__(self, encoder_outputs, encoder_state, decoded_tokens, \
loss=0, loss_value=0, enc_attn_weights=None, ptr_probs=None):
self.encoder_outputs = encoder_outputs
self.encoder_state = encoder_state
self.decoded_tokens = decoded_tokens # (out seq len, batch size)
self.loss = loss # scalar
self.loss_value = loss_value # float value, excluding coverage loss
self.enc_attn_weights = enc_attn_weights # (out seq len, batch size, src seq len)
self.ptr_probs = ptr_probs # (out seq len, batch size)
class Graph2Seq(nn.Module):
def __init__(self, config, word_embedding, word_vocab):
"""
:param word_vocab: mainly for info about special tokens and word_vocab size
:param config: model hyper-parameters
:param max_dec_steps: max num of decoding steps (only effective at test time, as during
training the num of steps is determined by the `target_tensor`); it is
safe to change `self.max_dec_steps` as the network architecture is
independent of src/tgt seq lengths
Create the graph2seq model; its encoder and decoder will be created automatically.
"""
super(Graph2Seq, self).__init__()
self.name = 'Graph2Seq'
self.device = config['device']
self.word_dropout = config['word_dropout']
self.edge_dropout = config['edge_dropout']
self.bert_dropout = config['bert_dropout']
self.word_vocab = word_vocab
self.vocab_size = len(word_vocab)
self.f_case = config['f_case']
self.f_pos = config['f_pos']
self.f_ner = config['f_ner']
self.f_freq = config['f_freq']
self.f_dep = config['f_dep']
self.f_ans = config['f_ans']
self.dan_type = config.get('dan_type', 'all')
self.max_dec_steps = config['max_dec_steps']
self.rnn_type = config['rnn_type']
self.enc_attn = config['enc_attn']
self.enc_attn_cover = config['enc_attn_cover']
self.dec_attn = config['dec_attn']
self.pointer = config['pointer']
self.pointer_loss_ratio = config['pointer_loss_ratio']
self.cover_loss = config['cover_loss']
self.cover_func = config['cover_func']
self.message_function = config['message_function']
self.use_bert = config['use_bert']
self.use_bert_weight = config['use_bert_weight']
self.use_bert_gamma = config['use_bert_gamma']
self.finetune_bert = config.get('finetune_bert', None)
bert_dim = (config['bert_dim'] if self.use_bert else 0)
enc_hidden_size = config['rnn_size']
if config['dec_hidden_size']:
dec_hidden_size = config['dec_hidden_size']
if self.rnn_type == 'lstm':
self.enc_dec_adapter = nn.ModuleList([nn.Linear(enc_hidden_size, dec_hidden_size) for _ in range(2)])
else:
self.enc_dec_adapter = nn.Linear(enc_hidden_size, dec_hidden_size)
else:
dec_hidden_size = enc_hidden_size
self.enc_dec_adapter = None
enc_input_dim = config['word_embed_dim']
self.word_embed = word_embedding
if config['fix_word_embed']:
print('[ Fix word embeddings ]')
for param in self.word_embed.parameters():
param.requires_grad = False
self.edge_embed = nn.Embedding(config['num_edge_types'], config['edge_embed_dim'], padding_idx=0)
if self.f_case:
self.case_embed = nn.Embedding(3, config['case_embed_dim'], padding_idx=0)
enc_input_dim += config['case_embed_dim']
if self.f_pos:
self.pos_embed = nn.Embedding(config['num_features_f_pos'], config['pos_embed_dim'], padding_idx=0)
enc_input_dim += config['pos_embed_dim']
if self.f_ner:
self.ner_embed = nn.Embedding(config['num_features_f_ner'], config['ner_embed_dim'], padding_idx=0)
enc_input_dim += config['ner_embed_dim']
if self.f_freq:
self.freq_embed = nn.Embedding(4, config['freq_embed_dim'], padding_idx=0)
enc_input_dim += config['freq_embed_dim']
if self.f_dep:
self.edge_embed = nn.Embedding(config['num_edge_types'], config['edge_embed_dim'], padding_idx=0)
enc_input_dim += config['edge_embed_dim']
if self.f_ans and self.dan_type in ('all', 'word'):
enc_input_dim += config['word_embed_dim']
if self.use_bert:
enc_input_dim += config['bert_dim']
if self.use_bert and self.use_bert_weight:
num_bert_layers = config['bert_layer_indexes'][1] - config['bert_layer_indexes'][0]
self.logits_bert_layers = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor(1, num_bert_layers)))
if self.use_bert_gamma:
self.gamma_bert_layers = nn.Parameter(nn.init.constant_(torch.Tensor(1, 1), 1.))
config['gl_input_size'] = enc_input_dim
self.ctx_rnn_encoder = EncoderRNN(enc_input_dim, enc_hidden_size, bidirectional=config['enc_bidi'], num_layers=config['num_enc_rnn_layers'], rnn_type=self.rnn_type,
rnn_dropout=config['enc_rnn_dropout'], device=self.device)
# Deep answer alignment
if self.f_ans:
if self.dan_type in ('all', 'word'):
self.ctx2ans_attn_l1 = Context2AnswerAttention(config['word_embed_dim'], config['hidden_size'])
if self.dan_type in ('all', 'hidden'):
self.ans_rnn_encoder = EncoderRNN(config['word_embed_dim'] + bert_dim, enc_hidden_size, bidirectional=config['enc_bidi'], num_layers=config['num_enc_rnn_layers'], rnn_type=self.rnn_type,
rnn_dropout=config['enc_rnn_dropout'], device=self.device)
self.ctx2ans_attn_l2 = Context2AnswerAttention(config['word_embed_dim'] + config['hidden_size'] + bert_dim, config['hidden_size'])
self.ctx_rnn_encoder_l2 = EncoderRNN(2 * enc_hidden_size, enc_hidden_size, bidirectional=config['enc_bidi'], num_layers=config['num_enc_rnn_layers'], rnn_type=self.rnn_type,
rnn_dropout=config['enc_rnn_dropout'], device=self.device)
print('[ Using Deep Answer Alignment Network: {} ]'.format(self.dan_type))
self.graph_encoder = GraphNN(config)
self.decoder = DecoderRNN(self.vocab_size, config['word_embed_dim'], dec_hidden_size, rnn_type=self.rnn_type,
enc_attn=config['enc_attn'], dec_attn=config['dec_attn'],
pointer=config['pointer'], out_embed_size=config['out_embed_size'],
tied_embedding=self.word_embed if config['tie_embed'] else None,
in_drop=config['dec_in_dropout'], rnn_drop=config['dec_rnn_dropout'],
out_drop=config['dec_out_dropout'], enc_hidden_size=enc_hidden_size, device=self.device)
def filter_oov(self, tensor, ext_vocab_size):
"""Replace any OOV index in `tensor` with UNK"""
if ext_vocab_size and ext_vocab_size > self.vocab_size:
result = tensor.clone()
result[tensor >= self.vocab_size] = self.word_vocab.UNK
return result
return tensor
def get_coverage_vector(self, enc_attn_weights):
"""Combine the past attention weights into one vector"""
if self.cover_func == 'max':
coverage_vector, _ = torch.max(torch.cat(enc_attn_weights), dim=0)
elif self.cover_func == 'sum':
coverage_vector = torch.sum(torch.cat(enc_attn_weights), dim=0)
else:
raise ValueError('Unrecognized cover_func: ' + self.cover_func)
return coverage_vector
def forward(self, ex, target_tensor=None, criterion=None, criterion_reduction=True, criterion_nll_only=False, \
rl_loss=False, *, forcing_ratio=0, partial_forcing=True, \
ext_vocab_size=None, sample=False, saved_out: Graph2SeqOutput=None, \
visualize: bool=None, include_cover_loss: bool=False) -> Graph2SeqOutput:
"""
:param input_tensor: tensor of word indices, (batch size, src seq len)
:param target_tensor: tensor of word indices, (batch size, tgt seq len)
:param input_lengths: see explanation in `EncoderRNN`
:param criterion: the loss function; if set, loss will be returned
:param forcing_ratio: see explanation in `Params` (requires `target_tensor`, training only)
:param partial_forcing: see explanation in `Params` (training only)
:param ext_vocab_size: see explanation in `DecoderRNN`
:param sample: if True, the returned `decoded_tokens` will be based on random sampling instead
of greedily selecting the token of the highest probability at each step
:param saved_out: the output of this function in a previous run; if set, the encoding step will
be skipped and we reuse the encoder states saved in this object
:param visualize: whether to return data for attention and pointer visualization; if None,
return if no `criterion` is provided
:param include_cover_loss: whether to include coverage loss in the returned `loss_value`
Run the graph2seq model for training or testing.
"""
input_tensor = ex['context']
input_lengths = ex['context_lens']
batch_size, input_length = input_tensor.shape
input_mask = create_mask(input_lengths, input_length, self.device)
log_prob = not (sample or self.decoder.pointer) # don't apply log too soon in these cases
if visualize is None:
visualize = criterion is None
if visualize and not (self.enc_attn or self.pointer):
visualize = False # nothing to visualize
if target_tensor is None:
target_length = self.max_dec_steps
target_mask = None
else:
target_tensor = target_tensor.transpose(1, 0)
target_length = target_tensor.size(0)
target_mask = create_mask(ex['target_lens'], target_length, self.device)
if forcing_ratio == 1:
# if fully teacher-forced, it may be possible to eliminate the for-loop over decoder steps
# for generality, this optimization is not investigated
use_teacher_forcing = True
elif forcing_ratio > 0:
if partial_forcing:
use_teacher_forcing = None # decide later individually in each step
else:
use_teacher_forcing = random.random() < forcing_ratio
else:
use_teacher_forcing = False
if saved_out: # reuse encoder states of a previous run
encoder_outputs = saved_out.encoder_outputs
encoder_state = saved_out.encoder_state
assert input_length == encoder_outputs.size(0)
assert batch_size == encoder_outputs.size(1)
else: # run the encoder
# encoder_embedded: (batch size, input len, embed size)
encoder_embedded = self.word_embed(self.filter_oov(input_tensor, ext_vocab_size))
encoder_embedded = dropout(encoder_embedded, self.word_dropout, shared_axes=[-2], training=self.training)
enc_input_cat = [encoder_embedded]
if self.f_case:
case_features = self.case_embed(ex['context_case'])
enc_input_cat.append(case_features)
if self.f_pos:
pos_features = self.pos_embed(ex['context_pos'])
enc_input_cat.append(pos_features)
if self.f_ner:
ner_features = self.ner_embed(ex['context_ner'])
enc_input_cat.append(ner_features)
if self.f_freq:
freq_features = self.freq_embed(ex['context_freq'])
enc_input_cat.append(freq_features)
if self.f_dep:
dep_features = self.edge_embed(ex['context_dep'])
enc_input_cat.append(dep_features)
if self.f_ans:
answer_tensor = ex['answers']
answer_lengths = ex['answer_lens']
ans_mask = create_mask(answer_lengths, answer_tensor.size(1), self.device)
ans_embedded = self.word_embed(self.filter_oov(answer_tensor, ext_vocab_size))
ans_embedded = dropout(ans_embedded, self.word_dropout, shared_axes=[-2], training=self.training)
enc_answer_cat = [ans_embedded]
if self.dan_type in ('all', 'word'):
# Align answer info to passage at the word level
ctx_aware_ans_emb = self.ctx2ans_attn_l1(encoder_embedded, ans_embedded, ans_embedded, ans_mask)
enc_input_cat.append(ctx_aware_ans_emb)
if self.use_bert:
context_bert = ex['context_bert']
if not self.finetune_bert:
assert context_bert.requires_grad == False
if self.use_bert_weight:
weights_bert_layers = torch.softmax(self.logits_bert_layers, dim=-1)
if self.use_bert_gamma:
weights_bert_layers = weights_bert_layers * self.gamma_bert_layers
context_bert = torch.mm(weights_bert_layers, context_bert.view(context_bert.size(0), -1)).view(context_bert.shape[1:])
context_bert = dropout(context_bert, self.bert_dropout, shared_axes=[-2], training=self.training)
enc_input_cat.append(context_bert)
if self.f_ans and self.dan_type in ('all', 'hidden'):
answer_bert = ex['answer_bert']
if not self.finetune_bert:
assert answer_bert.requires_grad == False
answer_bert = torch.mm(weights_bert_layers, answer_bert.view(answer_bert.size(0), -1)).view(answer_bert.shape[1:])
answer_bert = dropout(answer_bert, self.bert_dropout, shared_axes=[-2], training=self.training)
enc_answer_cat.append(answer_bert)
raw_input_vec = torch.cat(enc_input_cat, -1)
encoder_outputs = self.ctx_rnn_encoder(raw_input_vec, input_lengths)[0].transpose(0, 1)
if self.f_ans and self.dan_type in ('all', 'hidden'):
# Align answer info to passage at the hidden state level
enc_answer_cat = torch.cat(enc_answer_cat, -1)
ans_encoder_outputs = self.ans_rnn_encoder(enc_answer_cat, answer_lengths)[0].transpose(0, 1)
enc_cat_l2 = torch.cat([encoder_embedded, encoder_outputs], -1)
ans_cat_l2 = torch.cat([ans_embedded, ans_encoder_outputs], -1)
if self.use_bert:
enc_cat_l2 = torch.cat([enc_cat_l2, context_bert], -1)
ans_cat_l2 = torch.cat([ans_cat_l2, answer_bert], -1)
ctx_aware_ans_emb = self.ctx2ans_attn_l2(enc_cat_l2, \
ans_cat_l2, ans_encoder_outputs, ans_mask)
encoder_outputs = self.ctx_rnn_encoder_l2(torch.cat([encoder_outputs, ctx_aware_ans_emb], -1), \
input_lengths)[0].transpose(0, 1)
input_graphs = ex['context_graphs']
if self.message_function == 'edge_mm':
edge_vec = input_graphs['edge_features']
else:
edge_vec = self.edge_embed(input_graphs['edge_features'])
node_embedding, graph_embedding = self.graph_encoder(encoder_outputs, \
edge_vec, (input_graphs['node2edge'], input_graphs['edge2node']), \
node_mask=input_mask, raw_node_vec=raw_input_vec)
encoder_outputs = node_embedding
encoder_state = (graph_embedding, graph_embedding) if self.rnn_type == 'lstm' else graph_embedding
# initialize return values
r = Graph2SeqOutput(encoder_outputs, encoder_state,
torch.zeros(target_length, batch_size, dtype=torch.long))
if visualize:
r.enc_attn_weights = torch.zeros(target_length, batch_size, input_length)
if self.pointer:
r.ptr_probs = torch.zeros(target_length, batch_size)
if self.enc_dec_adapter is None:
decoder_state = encoder_state
else:
if self.rnn_type == 'lstm':
decoder_state = tuple([self.enc_dec_adapter[i](x) for i, x in enumerate(encoder_state)])
else:
decoder_state = self.enc_dec_adapter(encoder_state)
decoder_hiddens = []
enc_attn_weights = []
enc_context = None
dec_prob_ptr_tensor = []
decoder_input = to_cuda(torch.tensor([self.word_vocab.SOS] * batch_size), self.device)
for di in range(target_length):
decoder_embedded = self.word_embed(self.filter_oov(decoder_input, ext_vocab_size))
decoder_embedded = dropout(decoder_embedded, self.word_dropout, shared_axes=[-2], training=self.training)
if enc_attn_weights:
coverage_vector = self.get_coverage_vector(enc_attn_weights)
else:
coverage_vector = None
decoder_output, decoder_state, dec_enc_attn, dec_prob_ptr, enc_context = \
self.decoder(decoder_embedded, decoder_state, encoder_outputs,
torch.cat(decoder_hiddens) if decoder_hiddens else None, coverage_vector,
input_mask=input_mask,
encoder_word_idx=input_tensor, ext_vocab_size=ext_vocab_size,
log_prob=log_prob,
prev_enc_context=enc_context)
dec_prob_ptr_tensor.append(dec_prob_ptr)
if self.dec_attn:
decoder_hiddens.append(decoder_state[0] if self.rnn_type == 'lstm' else decoder_state)
# save the decoded tokens
if not sample:
_, top_idx = decoder_output.data.topk(1) # top_idx shape: (batch size, k=1)
else:
prob_distribution = torch.exp(decoder_output) if log_prob else decoder_output
top_idx = torch.multinomial(prob_distribution, 1)
top_idx = top_idx.squeeze(1).detach() # detach from history as input
r.decoded_tokens[di] = top_idx
# decide the next input
if use_teacher_forcing or (use_teacher_forcing is None and random.random() < forcing_ratio):
decoder_input = target_tensor[di] # teacher forcing
else:
decoder_input = top_idx
# compute loss
if criterion:
if target_tensor is None:
gold_standard = top_idx # for sampling
else:
gold_standard = target_tensor[di] if not rl_loss else decoder_input
if not log_prob:
decoder_output = torch.log(decoder_output + VERY_SMALL_NUMBER) # necessary for NLLLoss
if criterion_reduction:
nll_loss = criterion(decoder_output, gold_standard)
r.loss += nll_loss
r.loss_value += nll_loss.item()
else:
nll_loss = F.nll_loss(decoder_output, gold_standard, ignore_index=self.word_vocab.PAD, reduction='none')
r.loss += nll_loss
r.loss_value += nll_loss
# update attention history and compute coverage loss
if self.enc_attn_cover or (criterion and self.cover_loss > 0):
if not criterion_nll_only and coverage_vector is not None and criterion and self.cover_loss > 0:
if criterion_reduction:
coverage_loss = torch.sum(torch.min(coverage_vector, dec_enc_attn)) / batch_size * self.cover_loss
r.loss += coverage_loss
if include_cover_loss: r.loss_value += coverage_loss.item()
else:
coverage_loss = torch.sum(torch.min(coverage_vector, dec_enc_attn), dim=-1) * self.cover_loss
r.loss += coverage_loss
if include_cover_loss: r.loss_value += coverage_loss
enc_attn_weights.append(dec_enc_attn.unsqueeze(0))
# save data for visualization
if visualize:
r.enc_attn_weights[di] = dec_enc_attn.data
if self.pointer:
r.ptr_probs[di] = dec_prob_ptr.squeeze(1).data
# compute pointer network loss
if not criterion_nll_only and criterion and self.pointer_loss_ratio > 0 and target_tensor is not None:
dec_prob_ptr_tensor = torch.cat(dec_prob_ptr_tensor, -1)
pointer_loss = F.binary_cross_entropy(dec_prob_ptr_tensor, ex['target_copied'], reduction='none')
if criterion_reduction:
pointer_loss = torch.sum(pointer_loss * target_mask) / batch_size * self.pointer_loss_ratio
r.loss += pointer_loss
r.loss_value += pointer_loss.item()
else:
pointer_loss = torch.sum(pointer_loss * target_mask, dim=-1) * self.pointer_loss_ratio
r.loss += pointer_loss
r.loss_value += pointer_loss
return r
| 2.265625 | 2 |
towhee/engine/pipeline.py | jeffoverflow/towhee | 0 | 17759 | <reponame>jeffoverflow/towhee
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from towhee.engine.graph_context import GraphContext
from towhee.dag.graph_repr import GraphRepr
from towhee.dataframe.dataframe import DFIterator
class Pipeline:
"""
The runtime pipeline context
"""
def __init__(self, engine: Engine, graph_repr: GraphRepr, parallelism: int = 1) -> None:
"""
Args:
engine: the local engine to drive the Pipeline
graph_repr: the graph representation
parallelism: how many rows of inputs to be processed concurrently
"""
self._engine = engine
self._graph_repr = graph_repr
self._parallelism = parallelism
def build(self):
"""
Create GraphContexts and set up input iterators.
"""
raise NotImplementedError
def run(self, inputs: list) -> DFIterator:
"""
The Pipeline's main loop
Agrs:
inputs: the input data, organized as a list of DataFrame, feeding
to the Pipeline.
"""
# while we still have pipeline inputs:
# input = inputs.next()
# for g in graph contexts:
# if g.is_idle:
# g.start_op.inputs = input
# break
# if all graphs contexts are busy:
# wait for notification from _notify_run_loop
raise NotImplementedError
def on_start(self, handler: function):
"""
Set a custom handler that called before the execution of the graph.
"""
self._on_start_handler = handler
raise NotImplementedError
def on_finish(self, handler: function):
"""
Set a custom handler that called after the execution of the graph.
"""
self._on_finish_handler = handler
raise NotImplementedError
def _organize_outputs(self, graph_ctx: GraphContext):
"""
on_finish handler passing to GraphContext. The handler will organize the
GraphContext's output into Pipeline's outputs.
"""
raise NotImplementedError
def _notify_run_loop(self, graph_ctx: GraphContext):
"""
on_finish handler passing to GraphContext. The handler will notify the run loop
that a GraphContext is in idle state.
"""
raise NotImplementedError
| 2.1875 | 2 |
portfolio/models.py | MrInternauta/Python-Django-Portafolio-web-administrable | 0 | 17760 | from django.db import models
# Create your models here.
class Project(models.Model):
title = models.CharField(max_length = 200, verbose_name = "Titulo")
description = models.TextField(verbose_name="Descripcion")
image = models.ImageField(verbose_name="Imagen", upload_to = "projects")
link = models.URLField(null=True, blank=True, verbose_name="Direecion web")
created = models.DateTimeField(auto_now_add=True, verbose_name="Fecha de creacion")
updated = models.DateTimeField(auto_now=True, verbose_name="Fecha de actualizacion")
class Meta:
verbose_name = 'proyecto'
verbose_name_plural = 'proyectos'
ordering = ['-created']
def __str__(self):
return self.title | 2.328125 | 2 |
src/notifications/tests.py | kullo/webconfig | 0 | 17761 | <reponame>kullo/webconfig
# Copyright 2015–2020 Kullo GmbH
#
# This source code is licensed under the 3-clause BSD license. See LICENSE.txt
# in the root directory of this source tree for details.
from django.test import TestCase
# Create your tests here.
| 0.902344 | 1 |
examples/cam.py | jtme/button-shim | 0 | 17762 | #!/usr/bin/env python
import signal
import buttonshim
print("""
Button SHIM: rainbow.py
Command on button press.
Press Ctrl+C to exit.
""")
import commands
@buttonshim.on_press(buttonshim.BUTTON_A)
def button_a(button, pressed):
buttonshim.set_pixel(0x94, 0x00, 0xd3)
s=commands.getstatusoutput("raspistill -w 320 -h 240 -o IMG/snap.jpg")
print s
if s[0] != 0:
self.output(s[1], s[0])
else:
self.output("error occured", status[0])
@buttonshim.on_press(buttonshim.BUTTON_B)
def button_b(button, pressed):
buttonshim.set_pixel(0x00, 0x00, 0xff)
@buttonshim.on_press(buttonshim.BUTTON_C)
def button_c(button, pressed):
buttonshim.set_pixel(0x00, 0xff, 0x00)
@buttonshim.on_press(buttonshim.BUTTON_D)
def button_d(button, pressed):
buttonshim.set_pixel(0xff, 0xff, 0x00)
@buttonshim.on_press(buttonshim.BUTTON_E)
def button_e(button, pressed):
buttonshim.set_pixel(0xff, 0x00, 0x00)
signal.pause()
| 2.8125 | 3 |
built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/vega/algorithms/nas/sm_nas/mmdet_meta_cfgs/bbox_head/__init__.py | Huawei-Ascend/modelzoo | 12 | 17763 | <reponame>Huawei-Ascend/modelzoo
from .cascade_head import CascadeFCBBoxHead
from .convfc_bbox_head import SharedFCBBoxHead
__all__ = [
'CascadeFCBBoxHead',
'SharedFCBBoxHead']
| 1.21875 | 1 |
monzo/model/monzoaccount.py | elementechemlyn/pythonzo | 0 | 17764 | import datetime
from .monzobalance import MonzoBalance
from .monzopagination import MonzoPaging
from .monzotransaction import MonzoTransaction
class MonzoAccount(object):
def __init__(self,api,json_dict=None):
self.api = api
self.account_id = None
self.created = None
self.description = None
self.account_type = None
self.balance = None
if json_dict:
self.account_id = json_dict.get("id",None)
self.description = json_dict.get("description",None)
self.account_type = json_dict.get("type",None)
self.created = json_dict.get("created",None)
if self.created:
self.created = datetime.datetime.strptime(self.created,"%Y-%m-%dT%H:%M:%S.%fZ")
@classmethod
def listAccounts(cls,api):
accounts = []
accounts_json = api.listAccounts()
for account_json in accounts_json["accounts"]:
account = cls(api,account_json)
accounts.append(account)
return accounts
@classmethod
def getAccount(cls,api,account_id):
accounts_json = api.listAccounts()
for account_json in accounts_json["accounts"]:
account = cls(api,account_json)
if account.account_id == account_id:
account.readBalance()
return account
return None
def readBalance(self):
balance_json = self.api.readBalance(self.account_id)
self.balance = MonzoBalance(self.api,balance_json)
return self.balance
def listTransactionsThisMonth(self,expand=None):
now = datetime.datetime.now()
this_month = now.replace(day=1,hour=0,minute=0,second=0,microsecond=0)
page = MonzoPaging()
page.set_since_date(this_month)
return self.listTransactions(page,expand)
def listTransactionsToday(self,expland=None):
now = datetime.datetime.now()
today = now.replace(hour=0,minute=0,second=0,microsecond=0)
page = MonzoPaging()
page.set_since_date(today)
return self.listTransactions(page,expand)
def listTransactionSinceDate(self,from_dt,expand=None):
page = MonzoPaging()
page.set_since_date(from_dt)
return self.listTransactions(page,expand)
def listTransactionsSinceTransaction(self,trans_id,expand=None):
page = MonzoPaging()
page.set_since_trans(trans_id)
return self.listTransactions(page,expand)
def listTransactionsBetween(self,since_dt,to_dt,expand=None):
page = MonzoPaging()
page.set_since_date(since_dt)
page.set_before(to_dt)
return self.listTransactions(page,expand)
def listTransactions(self,pagination=None,expand=None):
transactions = []
transactions_json = self.api.listTransactions(self.account_id,pagination,expand)
for transaction_json in transactions_json["transactions"]:
transaction = MonzoTransaction(self.api,transaction_json)
transactions.append(transaction)
return transactions
| 2.53125 | 3 |
learninghouse/api/errors/__init__.py | DerOetzi/learninghouse-core | 1 | 17765 | from typing import Dict, Optional
from fastapi import status, Request
from fastapi.responses import JSONResponse
from fastapi.exceptions import RequestValidationError
from learninghouse.models import LearningHouseErrorMessage
MIMETYPE_JSON = 'application/json'
class LearningHouseException(Exception):
STATUS_CODE = status.HTTP_500_INTERNAL_SERVER_ERROR
UNKNOWN = 'UNKNOWN'
DESCRIPTION = 'An unknown exception occurred ' +\
'while handling your request.'
def __init__(self,
status_code: Optional[int] = None,
key: Optional[str] = None,
description: Optional[str] = None):
super().__init__()
self.http_status_code: int = status_code or self.STATUS_CODE
self.error: LearningHouseErrorMessage = LearningHouseErrorMessage(
error=key or self.UNKNOWN,
description=description or self.DESCRIPTION
)
def response(self) -> JSONResponse:
return JSONResponse(content=self.error.dict(), status_code=self.http_status_code)
@classmethod
def api_description(cls) -> Dict:
return {
'model': LearningHouseErrorMessage,
'description': 'An exception occured which is not handled by the service now. ' +
'Please write an issue on GitHub.',
'content': {
MIMETYPE_JSON: {
'example': {
'error': cls.UNKNOWN,
'description': cls.DESCRIPTION
}
}
}
}
class LearningHouseSecurityException(LearningHouseException):
STATUS_CODE = status.HTTP_403_FORBIDDEN
SECURITY_EXCEPTION = 'SECURITY_EXCEPTION'
DESCRIPTION = 'A security violation occured while handling your request.'
def __init__(self, description: str):
super().__init__(self.STATUS_CODE,
self.SECURITY_EXCEPTION,
description or self.DESCRIPTION)
@classmethod
def api_description(cls) -> Dict:
return {
'model': LearningHouseErrorMessage,
'description': 'The request didn\'t pass security checks.',
'content': {
MIMETYPE_JSON: {
'example': {
'error': cls.SECURITY_EXCEPTION,
'description': cls.DESCRIPTION
}
}
}
}
class LearningHouseValidationError(LearningHouseException):
STATUS_CODE = status.HTTP_422_UNPROCESSABLE_ENTITY
VALIDATION_ERROR = 'VALIDATION_ERROR'
DESCRIPTION = 'A validation error occurred while handling your request.'
def __init__(self, description: Optional[str] = None):
super().__init__(self.STATUS_CODE,
self.VALIDATION_ERROR,
description or self.DESCRIPTION)
@classmethod
def api_description(cls) -> Dict:
return {
'model': LearningHouseErrorMessage,
'description': 'The request didn\'t pass input validation',
'content': {
MIMETYPE_JSON: {
'example': {
'error': cls.VALIDATION_ERROR,
'description': cls.DESCRIPTION
}
}
}
}
async def validation_error_handler(request: Request, exc: RequestValidationError) -> JSONResponse: # pylint: disable=unused-argument
return LearningHouseValidationError(str(exc)).response()
async def learninghouse_exception_handler(request: Request, exc: LearningHouseException): # pylint: disable=unused-argument
return exc.response()
| 2.78125 | 3 |
moda/dataprep/create_dataset.py | Patte1808/moda | 0 | 17766 | <reponame>Patte1808/moda
import pandas as pd
def get_windowed_ts(ranged_ts, window_size, with_actual=True):
"""
Creates a data frame where each row is a window of samples from the time series.
Each consecutive row is a shift of 1 cell from the previous row.
For example: [[1,2,3],[2,3,4],[3,4,5]]
:param ranged_ts: a pd.DataFrame containing one column for values and one pd.DatetimeIndex for dates
:param window_size: The number of timestamps to be used as features
:param with_actual: Whether to increase window size by one, and treat the last column as the ground truth
(relevant for forecasting scenarios). Returns the same output just with a window size bigger by 1.
:return:
"""
windowed_ts = ranged_ts
windowed_ts_copy = windowed_ts.copy()
for i in range(window_size - 1 + int(with_actual)):
windowed_ts = pd.concat([windowed_ts, windowed_ts_copy.shift(-(i + 1))], axis=1)
windowed_ts = windowed_ts.dropna(axis=0)
return windowed_ts
def split_history_and_current(windowed_ts):
"""
Returns the first n-1 columns as X, and the last column as y. Useful mainly for forecasting scenarios
:param windowed_ts: a pd.DataFrame with a date index and a column per timestamp. see get_windowed_ts
:return:
"""
X = windowed_ts.iloc[:, :-1].values
y = windowed_ts.iloc[:, -1].values
return (X, y)
if __name__ == "__main__":
ranged_ts = pd.DataFrame({"date": range(6), "value": range(6)})
ranged_ts["date"] = pd.to_datetime(ranged_ts["date"])
ranged_ts = ranged_ts.set_index(pd.DatetimeIndex(ranged_ts["date"]))
ranged_ts = ranged_ts.drop(columns="date")
ranged_ts.head()
windowed_df = get_windowed_ts(ranged_ts, window_size=3, with_actual=False)
| 3.53125 | 4 |
test/test_ufunc.py | tuwien-cms/xprec | 6 | 17767 | <gh_stars>1-10
# Copyright (C) 2021 <NAME> and others
# SPDX-License-Identifier: MIT
import numpy as np
import xprec
def _compare_ufunc(ufunc, *args, ulps=1):
fx_d = ufunc(*args)
fx_q = ufunc(*(a.astype(xprec.ddouble) for a in args)).astype(float)
# Ensure relative accuracy of 2 ulps
np.testing.assert_array_almost_equal_nulp(fx_d, fx_q, ulps)
def test_log():
x = np.geomspace(1e-300, 1e300, 1953)
_compare_ufunc(np.log, x)
zeroq = xprec.ddouble.type(0)
assert np.isinf(np.log(zeroq))
def test_sqrt():
x = np.geomspace(1e-300, 1e300, 1953)
_compare_ufunc(np.sqrt, x)
def test_exp():
x = np.geomspace(1e-300, 700, 4953)
x = np.hstack([-x[::-1], 0, x])
_compare_ufunc(np.exp, x)
# Unfortunately, on Windows expm1 is less precise, so we need to increase
# the tolerance slightly
_compare_ufunc(np.expm1, x, ulps=2)
def test_cosh():
x = np.geomspace(1e-300, 700, 4953)
x = np.hstack([-x[::-1], 0, x])
_compare_ufunc(np.cosh, x)
_compare_ufunc(np.sinh, x)
thousand = xprec.ddouble.type(1000)
assert np.isinf(np.cosh(thousand))
assert np.isinf(np.cosh(-thousand))
def test_hypot():
x = np.geomspace(1e-300, 1e260, 47)
x = np.hstack([-x[::-1], 0, x])
_compare_ufunc(np.hypot, x[:,None], x[None,:])
| 1.828125 | 2 |
echolect/millstone/__init__.py | ryanvolz/echolect | 1 | 17768 | from .read_hdf5 import *
from .hdf5_api import * | 1 | 1 |
pyptoolz/transforms.py | embedio/pyplinez | 0 | 17769 | from pathlib import Path
from toolz import itertoolz, curried
import vaex
transform_path_to_posix = lambda path: path.as_posix()
def path_to_posix():
return curried.valmap(transform_path_to_posix)
transform_xlsx_to_vaex = lambda path: vaex.from_ascii(path, seperator="\t")
def xlsx_to_vaex():
return curried.valmap(transform_ascii_to_vaex)
transform_ascii_to_vaex = lambda path: vaex.from_ascii(path, seperator="\t")
def ascii_to_vaex():
return curried.valmap(transform_ascii_to_vaex)
transform_ascii_to_vaex2 = lambda path: vaex.from_ascii(path)
def ascii_to_vaex2():
return curried.valmap(transform_ascii_to_vaex2)
transform_vaex_to_list = lambda df: [itertoolz.second(x) for x in df.iterrows()]
def vaex_rows_to_list():
return curried.valmap(transform_vaex_to_list)
transform_vaex_to_dict = lambda df: df.to_dict()
def vaex_to_dict():
return curried.valmap(transform_vaex_to_dict)
| 2.609375 | 3 |
experiments/s3-image-resize/chalicelib/s3_helpers.py | llamapope/chalice-experiments | 0 | 17770 | <gh_stars>0
import PIL
from PIL import Image
from io import BytesIO
import re
def resize(s3_client, bucket, original_key, width, height, suffix):
obj = s3_client.get_object(Bucket=bucket, Key=original_key)
full_size_key = original_key.replace('__incoming/', '')
ext = re.sub(r'.+\.([^.]+)$', r'\1', full_size_key)
key = re.sub(r'\.[^.]+$', '', full_size_key)
content_type = obj['ContentType']
if content_type == 'image/png':
image_type = 'PNG'
elif content_type == 'image/jpg' or content_type == 'image/jpeg':
image_type = 'JPEG'
else:
raise Exception(f'Invalid image type: {content_type}')
obj_body = obj['Body'].read()
img = Image.open(BytesIO(obj_body))
img = img.resize((width, height), PIL.Image.ANTIALIAS)
buffer = BytesIO()
img.save(buffer, image_type)
buffer.seek(0)
resized_key=f"{key}-{suffix}.{ext}"
# write the resized image
obj = s3_client.put_object(
Key=resized_key,
Bucket=bucket,
Body=buffer,
ContentType=content_type,
ACL='public-read')
# move the original out of __incoming
s3_client.copy_object(
Bucket=bucket,
Key=full_size_key,
CopySource=f'{bucket}/{original_key}',
ACL='public-read')
s3_client.delete_object(Bucket=bucket, Key=original_key)
app.log.debug("resized: %s, key: %s",
bucket, key) | 2.4375 | 2 |
synapse/handlers/room_member_worker.py | lukaslihotzki/synapse | 9,945 | 17771 | # Copyright 2018-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, List, Optional, Tuple
from synapse.api.errors import SynapseError
from synapse.handlers.room_member import RoomMemberHandler
from synapse.replication.http.membership import (
ReplicationRemoteJoinRestServlet as ReplRemoteJoin,
ReplicationRemoteKnockRestServlet as ReplRemoteKnock,
ReplicationRemoteRejectInviteRestServlet as ReplRejectInvite,
ReplicationRemoteRescindKnockRestServlet as ReplRescindKnock,
ReplicationUserJoinedLeftRoomRestServlet as ReplJoinedLeft,
)
from synapse.types import JsonDict, Requester, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class RoomMemberWorkerHandler(RoomMemberHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self._remote_join_client = ReplRemoteJoin.make_client(hs)
self._remote_knock_client = ReplRemoteKnock.make_client(hs)
self._remote_reject_client = ReplRejectInvite.make_client(hs)
self._remote_rescind_client = ReplRescindKnock.make_client(hs)
self._notify_change_client = ReplJoinedLeft.make_client(hs)
async def _remote_join(
self,
requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Implements RoomMemberHandler._remote_join"""
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
ret = await self._remote_join_client(
requester=requester,
remote_room_hosts=remote_room_hosts,
room_id=room_id,
user_id=user.to_string(),
content=content,
)
return ret["event_id"], ret["stream_id"]
async def remote_reject_invite(
self,
invite_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: dict,
) -> Tuple[str, int]:
"""
Rejects an out-of-band invite received from a remote user
Implements RoomMemberHandler.remote_reject_invite
"""
ret = await self._remote_reject_client(
invite_event_id=invite_event_id,
txn_id=txn_id,
requester=requester,
content=content,
)
return ret["event_id"], ret["stream_id"]
async def remote_rescind_knock(
self,
knock_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rescinds a local knock made on a remote room
Args:
knock_event_id: the knock event
txn_id: optional transaction ID supplied by the client
requester: user making the request, according to the access token
content: additional content to include in the leave event.
Normally an empty dict.
Returns:
A tuple containing (event_id, stream_id of the leave event)
"""
ret = await self._remote_rescind_client(
knock_event_id=knock_event_id,
txn_id=txn_id,
requester=requester,
content=content,
)
return ret["event_id"], ret["stream_id"]
async def remote_knock(
self,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Sends a knock to a room.
Implements RoomMemberHandler.remote_knock
"""
ret = await self._remote_knock_client(
remote_room_hosts=remote_room_hosts,
room_id=room_id,
user=user,
content=content,
)
return ret["event_id"], ret["stream_id"]
async def _user_left_room(self, target: UserID, room_id: str) -> None:
"""Implements RoomMemberHandler._user_left_room"""
await self._notify_change_client(
user_id=target.to_string(), room_id=room_id, change="left"
)
async def forget(self, target: UserID, room_id: str) -> None:
raise RuntimeError("Cannot forget rooms on workers.")
| 1.554688 | 2 |
win/devkit/other/pymel/extras/completion/py/maya/app/edl/importExport.py | leegoonz/Maya-devkit | 10 | 17772 | <filename>win/devkit/other/pymel/extras/completion/py/maya/app/edl/importExport.py
import tempfile
import maya.OpenMaya as OpenMaya
import maya.OpenMayaRender as OpenMayaRender
import maya.OpenMayaMPx as OpenMayaMPx
import maya.cmds as cmds
import maya
import re
from maya.app.edl.fcp import *
class ImportExport(OpenMayaMPx.MPxCommand):
def __del__(self):
pass
def __init__(self):
pass
class Exporter(ImportExport):
def __init__(self):
pass
def doIt(self, fileName):
pass
def setAllowPlayblast(self, allow):
"""
If true, will re-playblast of all shots whose clips are out of date
or non-existent.
"""
pass
class Importer(ImportExport):
def __init__(self):
pass
def doIt(self, fileName):
"""
Reads an EDL file into Maya. Will generate shots, tracks and audio in Maya that
corresponds to the tracks and clips in the EDL.
"""
pass
def setStartFrameOverride(self, frame):
pass
def _setTimeCode(timecode):
pass
def doExport(fileName, allowPlayblast):
"""
Exports the Maya sequence using the EDL Exporter class.
"""
pass
def doMel(*args, **kwargs):
"""
Takes as input a string containing MEL code, evaluates it, and returns the result.
This function takes a string which contains MEL code and evaluates it using
the MEL interpreter. The result is converted into a Python data type and is
returned.
If an error occurs during the execution of the MEL script, a Python exception
is raised with the appropriate error message.
"""
pass
def audioClipCompare(a, b):
pass
def _getValidClipObjectName(clipName, isVideo):
pass
def doImport(fileName, useStartFrameOverride, startFrame):
"""
Imports the specified file using the EDL Importer class.
"""
pass
def _nameToNode(name):
pass
def getTimeCode():
pass
def videoClipCompare(a, b):
pass
def getShotsResolution():
"""
Returns the video resolution of the sequencer if all the shots have the same resolution
Otherwise it returns False, 0, 0
"""
pass
mayaFrameRates = {}
| 2.34375 | 2 |
packages/mcni/python/mcni/instrument_simulator/__init__.py | mcvine/mcvine | 5 | 17773 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 2006-2010 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
## Note:
## 1. This package depends on dsm
def copyright():
return "mcni.instrument_simulators module: Copyright (c) 2006-2010 <NAME>";
def simulator(neutron_coordinates_transformer):
t = neutron_coordinates_transformer
from .AbstractInstrumentSimulator import AbstractInstrumentSimulator as base
class Simulator(base):
neutron_coordinates_transformer = t
pass
return Simulator()
from mcni.neutron_coordinates_transformers import default as default_neutron_coordinates_transformer
default_simulator = simulator( default_neutron_coordinates_transformer )
# version
__id__ = "$Id$"
# End of file
| 1.859375 | 2 |
todolist/wsgi.py | HangeZoe/django-todo-list | 0 | 17774 | <reponame>HangeZoe/django-todo-list<filename>todolist/wsgi.py
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'todolist.settings')
application = get_wsgi_application()
| 1.335938 | 1 |
cern_search_rest_api/modules/cernsearch/cli.py | inveniosoftware-contrib/citadel-search | 6 | 17775 | <filename>cern_search_rest_api/modules/cernsearch/cli.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of CERN Search.
# Copyright (C) 2018-2021 CERN.
#
# Citadel Search is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Click command-line utilities."""
import json
import click
from flask.cli import with_appcontext
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_records.models import RecordMetadata
from invenio_search import current_search
from invenio_search.cli import es_version_check
from cern_search_rest_api.modules.cernsearch.indexer import CernSearchRecordIndexer
from cern_search_rest_api.modules.cernsearch.indexer_tasks import process_bulk_queue
def abort_if_false(ctx, param, value):
"""Abort command is value is False."""
if not value:
ctx.abort()
@click.group()
def utils():
"""Misc management commands."""
@utils.command("runindex")
@click.option("--delayed", "-d", is_flag=True, help="Run indexing in background.")
@click.option(
"--chunk_size",
"-s",
default=500,
type=int,
help="Number of docs in one chunk sent to es (default: 500)",
)
@click.option(
"--max_chunk_bytes",
"-b",
default=int(99.9 * 1024 * 1024),
type=int,
help="The maximum size of the request in bytes (default: 100MB).",
)
@click.option(
"--concurrency",
"-c",
default=1,
type=int,
help="Number of concurrent indexing tasks to start.",
)
@click.option(
"--queue",
"-q",
type=str,
help="Name of the celery queue used to put the tasks into.",
)
@click.option("--version-type", help="Elasticsearch version type to use.")
@click.option(
"--raise-on-error/--skip-errors",
default=True,
help="Controls if Elasticsearch bulk indexing errors raise an exception.",
)
@with_appcontext
def run(
delayed,
chunk_size,
max_chunk_bytes,
concurrency,
queue=None,
version_type=None,
raise_on_error=True,
):
"""Run bulk record indexing."""
es_bulk_kwargs = {
"raise_on_error": raise_on_error,
"chunk_size": chunk_size,
"max_chunk_bytes": max_chunk_bytes,
}
if delayed:
celery_kwargs = {"kwargs": {"version_type": version_type, "es_bulk_kwargs": es_bulk_kwargs}}
click.secho("Starting {0} tasks for indexing records...".format(concurrency), fg="green")
if queue is not None:
celery_kwargs.update({"queue": queue})
for c in range(0, concurrency):
process_bulk_queue.apply_async(**celery_kwargs)
else:
click.secho("Indexing records...", fg="green")
CernSearchRecordIndexer(version_type=version_type).process_bulk_queue(es_bulk_kwargs=es_bulk_kwargs)
@utils.command("reindex")
@click.option(
"--yes-i-know",
is_flag=True,
callback=abort_if_false,
expose_value=False,
prompt="Do you really want to reindex all records?",
)
@click.option("-t", "--pid-type", multiple=True, required=True)
@click.option("-i", "--id", "id_list", help="List of ids.", multiple=True)
@click.option("-d", "--doc-type", required=False)
@with_appcontext
def reindex(pid_type, id_list, doc_type=None):
"""Reindex all records.
:param pid_type: Pid type.
:param id_list: List of ids.
:param doc_type: Doc type
"""
click.secho("Sending records to indexing queue ...", fg="green")
query = id_list
if not query:
query = (
PersistentIdentifier.query.filter_by(object_type="rec", status=PIDStatus.REGISTERED)
.join(RecordMetadata, PersistentIdentifier.object_uuid == RecordMetadata.id)
.filter(PersistentIdentifier.pid_type.in_(pid_type))
)
if doc_type:
query = query.filter(RecordMetadata.json.op("->>")("$schema").contains(doc_type))
query = (x[0] for x in query.yield_per(100).values(PersistentIdentifier.object_uuid))
CernSearchRecordIndexer().bulk_index(query)
click.secho('Execute "run" command to process the queue!', fg="yellow")
@utils.command("index-init")
@click.argument("index_name")
@click.option("-f", "--force", is_flag=True, default=False)
@click.option("-v", "--verbose", is_flag=True, default=False)
@with_appcontext
@es_version_check
def index_init(index_name, force, verbose):
"""Init index by its name."""
results = list(current_search.create(index_list=[index_name], ignore_existing=force))
if verbose:
click.echo(json.dumps(results))
| 2.015625 | 2 |
kasaya/core/backend/redisstore.py | AYAtechnologies/Kasaya-esb | 1 | 17776 | <gh_stars>1-10
__author__ = 'wektor'
from generic import GenericBackend
import redis
class RedisBackend(GenericBackend):
def __init__(self):
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
self.store = redis.Redis(connection_pool=pool)
def get_typecode(self, value):
typecode = str(type(value)).split("'")[1]
return typecode
def set(self, key, value):
data = {}
data["type"] = self.get_typecode(value)
data["data"] = value
self.store.hmset(key, data)
# def update(self, key, value):
def get(self, key):
data = self.store.hgetall(key)
print data
try:
if data["type"] != "str":
return eval(data["data"])
else:
return data["data"]
except KeyError:
return {}
def delete(self, key):
self.store.delete(key) | 2.59375 | 3 |
venv/lib/python3.6/site-packages/ansible_collections/amazon/aws/plugins/module_utils/rds.py | usegalaxy-no/usegalaxy | 22 | 17777 | <filename>venv/lib/python3.6/site-packages/ansible_collections/amazon/aws/plugins/module_utils/rds.py
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import namedtuple
from time import sleep
try:
from botocore.exceptions import BotoCoreError, ClientError, WaiterError
except ImportError:
pass
from ansible.module_utils._text import to_text
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
from .ec2 import AWSRetry
from .ec2 import ansible_dict_to_boto3_tag_list
from .ec2 import boto3_tag_list_to_ansible_dict
from .ec2 import compare_aws_tags
from .waiters import get_waiter
Boto3ClientMethod = namedtuple('Boto3ClientMethod', ['name', 'waiter', 'operation_description', 'cluster', 'instance'])
# Whitelist boto3 client methods for cluster and instance resources
cluster_method_names = [
'create_db_cluster', 'restore_db_cluster_from_db_snapshot', 'restore_db_cluster_from_s3',
'restore_db_cluster_to_point_in_time', 'modify_db_cluster', 'delete_db_cluster', 'add_tags_to_resource',
'remove_tags_from_resource', 'list_tags_for_resource', 'promote_read_replica_db_cluster'
]
instance_method_names = [
'create_db_instance', 'restore_db_instance_to_point_in_time', 'restore_db_instance_from_s3',
'restore_db_instance_from_db_snapshot', 'create_db_instance_read_replica', 'modify_db_instance',
'delete_db_instance', 'add_tags_to_resource', 'remove_tags_from_resource', 'list_tags_for_resource',
'promote_read_replica', 'stop_db_instance', 'start_db_instance', 'reboot_db_instance'
]
def get_rds_method_attribute(method_name, module):
readable_op = method_name.replace('_', ' ').replace('db', 'DB')
if method_name in cluster_method_names and 'new_db_cluster_identifier' in module.params:
cluster = True
instance = False
if method_name == 'delete_db_cluster':
waiter = 'cluster_deleted'
else:
waiter = 'cluster_available'
elif method_name in instance_method_names and 'new_db_instance_identifier' in module.params:
cluster = False
instance = True
if method_name == 'delete_db_instance':
waiter = 'db_instance_deleted'
elif method_name == 'stop_db_instance':
waiter = 'db_instance_stopped'
else:
waiter = 'db_instance_available'
else:
raise NotImplementedError("method {0} hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py".format(method_name))
return Boto3ClientMethod(name=method_name, waiter=waiter, operation_description=readable_op, cluster=cluster, instance=instance)
def get_final_identifier(method_name, module):
apply_immediately = module.params['apply_immediately']
if get_rds_method_attribute(method_name, module).cluster:
identifier = module.params['db_cluster_identifier']
updated_identifier = module.params['new_db_cluster_identifier']
elif get_rds_method_attribute(method_name, module).instance:
identifier = module.params['db_instance_identifier']
updated_identifier = module.params['new_db_instance_identifier']
else:
raise NotImplementedError("method {0} hasn't been added to the list of accepted methods in module_utils/rds.py".format(method_name))
if not module.check_mode and updated_identifier and apply_immediately:
identifier = updated_identifier
return identifier
def handle_errors(module, exception, method_name, parameters):
if not isinstance(exception, ClientError):
module.fail_json_aws(exception, msg="Unexpected failure for method {0} with parameters {1}".format(method_name, parameters))
changed = True
error_code = exception.response['Error']['Code']
if method_name == 'modify_db_instance' and error_code == 'InvalidParameterCombination':
if 'No modifications were requested' in to_text(exception):
changed = False
elif 'ModifyDbCluster API' in to_text(exception):
module.fail_json_aws(exception, msg='It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster')
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
elif method_name == 'promote_read_replica' and error_code == 'InvalidDBInstanceState':
if 'DB Instance is not a read replica' in to_text(exception):
changed = False
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
elif method_name == 'create_db_instance' and exception.response['Error']['Code'] == 'InvalidParameterValue':
accepted_engines = [
'aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-se',
'oracle-se1', 'oracle-se2', 'postgres', 'sqlserver-ee', 'sqlserver-ex', 'sqlserver-se', 'sqlserver-web'
]
if parameters.get('Engine') not in accepted_engines:
module.fail_json_aws(exception, msg='DB engine {0} should be one of {1}'.format(parameters.get('Engine'), accepted_engines))
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
return changed
def call_method(client, module, method_name, parameters):
result = {}
changed = True
if not module.check_mode:
wait = module.params['wait']
# TODO: stabilize by adding get_rds_method_attribute(method_name).extra_retry_codes
method = getattr(client, method_name)
try:
if method_name == 'modify_db_instance':
# check if instance is in an available state first, if possible
if wait:
wait_for_status(client, module, module.params['db_instance_identifier'], method_name)
result = AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidDBInstanceState'])(method)(**parameters)
else:
result = AWSRetry.jittered_backoff()(method)(**parameters)
except (BotoCoreError, ClientError) as e:
changed = handle_errors(module, e, method_name, parameters)
if wait and changed:
identifier = get_final_identifier(method_name, module)
wait_for_status(client, module, identifier, method_name)
return result, changed
def wait_for_instance_status(client, module, db_instance_id, waiter_name):
def wait(client, db_instance_id, waiter_name, extra_retry_codes):
retry = AWSRetry.jittered_backoff(catch_extra_error_codes=extra_retry_codes)
try:
waiter = client.get_waiter(waiter_name)
except ValueError:
# using a waiter in module_utils/waiters.py
waiter = get_waiter(client, waiter_name)
waiter.wait(WaiterConfig={'Delay': 60, 'MaxAttempts': 60}, DBInstanceIdentifier=db_instance_id)
waiter_expected_status = {
'db_instance_deleted': 'deleted',
'db_instance_stopped': 'stopped',
}
expected_status = waiter_expected_status.get(waiter_name, 'available')
if expected_status == 'available':
extra_retry_codes = ['DBInstanceNotFound']
else:
extra_retry_codes = []
for attempt_to_wait in range(0, 10):
try:
wait(client, db_instance_id, waiter_name, extra_retry_codes)
break
except WaiterError as e:
# Instance may be renamed and AWSRetry doesn't handle WaiterError
if e.last_response.get('Error', {}).get('Code') == 'DBInstanceNotFound':
sleep(10)
continue
module.fail_json_aws(e, msg='Error while waiting for DB instance {0} to be {1}'.format(db_instance_id, expected_status))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Unexpected error while waiting for DB instance {0} to be {1}'.format(
db_instance_id, expected_status)
)
def wait_for_cluster_status(client, module, db_cluster_id, waiter_name):
try:
waiter = get_waiter(client, waiter_name).wait(DBClusterIdentifier=db_cluster_id)
except WaiterError as e:
if waiter_name == 'cluster_deleted':
msg = "Failed to wait for DB cluster {0} to be deleted".format(db_cluster_id)
else:
msg = "Failed to wait for DB cluster {0} to be available".format(db_cluster_id)
module.fail_json_aws(e, msg=msg)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_cluster_id))
def wait_for_status(client, module, identifier, method_name):
waiter_name = get_rds_method_attribute(method_name, module).waiter
if get_rds_method_attribute(method_name, module).cluster:
wait_for_cluster_status(client, module, identifier, waiter_name)
elif get_rds_method_attribute(method_name, module).instance:
wait_for_instance_status(client, module, identifier, waiter_name)
else:
raise NotImplementedError("method {0} hasn't been added to the whitelist of handled methods".format(method_name))
def get_tags(client, module, cluster_arn):
try:
return boto3_tag_list_to_ansible_dict(
client.list_tags_for_resource(ResourceName=cluster_arn)['TagList']
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to describe tags")
def arg_spec_to_rds_params(options_dict):
tags = options_dict.pop('tags')
has_processor_features = False
if 'processor_features' in options_dict:
has_processor_features = True
processor_features = options_dict.pop('processor_features')
camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True)
for key in list(camel_options.keys()):
for old, new in (('Db', 'DB'), ('Iam', 'IAM'), ('Az', 'AZ')):
if old in key:
camel_options[key.replace(old, new)] = camel_options.pop(key)
camel_options['Tags'] = tags
if has_processor_features:
camel_options['ProcessorFeatures'] = processor_features
return camel_options
def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
if tags is None:
return False
tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags)
changed = bool(tags_to_add or tags_to_remove)
if tags_to_add:
call_method(
client, module, method_name='add_tags_to_resource',
parameters={'ResourceName': resource_arn, 'Tags': ansible_dict_to_boto3_tag_list(tags_to_add)}
)
if tags_to_remove:
call_method(
client, module, method_name='remove_tags_from_resource',
parameters={'ResourceName': resource_arn, 'TagKeys': tags_to_remove}
)
return changed
| 1.609375 | 2 |
week06/lecture/examples/src6/2/uppercase0.py | uldash/CS50x | 0 | 17778 | <reponame>uldash/CS50x
# Uppercases string one character at a time
from cs50 import get_string
s = get_string("Before: ")
print("After: ", end="")
for c in s:
print(c.upper(), end="")
print()
| 3.453125 | 3 |
tests/test_kobo.py | Donearm/kobuddy | 75 | 17779 | from datetime import datetime
from pathlib import Path
import pytz
import kobuddy
def get_test_db():
# db = Path(__file__).absolute().parent.parent / 'KoboShelfes' / 'KoboReader.sqlite.0'
db = Path(__file__).absolute().parent / 'data' / 'kobo_notes' / 'input' / 'KoboReader.sqlite'
return db
# a bit meh, but ok for now
kobuddy.set_databases(get_test_db())
from kobuddy import _iter_events_aux, get_events, get_books_with_highlights, _iter_highlights
def test_events():
for e in _iter_events_aux():
print(e)
def test_hls():
for h in _iter_highlights():
print(h)
def test_get_all():
events = get_events()
assert len(events) > 50
for d in events:
print(d)
def test_books_with_highlights():
pages = get_books_with_highlights()
g = pages[0]
assert 'Essentialism' in g.book
hls = g.highlights
assert len(hls) == 273
[b] = [h for h in hls if h.eid == '520b7b13-dbef-4402-9a81-0f4e0c4978de']
# TODO wonder if there might be any useful info? StartContainerPath, EndContainerPath
assert b.kind == 'bookmark'
# TODO move to a more specific test?
# TODO assert sorted by date or smth?
assert hls[0].kind == 'highlight'
# TODO assert highlights got no annotation? not sure if it's even necessary to distinguish..
[ann] = [h for h in hls if h.annotation is not None and len(h.annotation) > 0]
assert ann.eid == 'eb264817-9a06-42fd-92ff-7bd38cd9ca79'
assert ann.kind == 'annotation'
assert ann.text == 'He does this by finding which machine has the biggest queue of materials waiting behind it and finds a way to increase its efficiency.'
assert ann.annotation == 'Bottleneck'
assert ann.dt == datetime(year=2017, month=8, day=12, hour=3, minute=49, second=13, microsecond=0, tzinfo=pytz.utc)
assert ann.book.author == '<NAME>'
assert len(pages) == 7
def test_history():
kobuddy.print_progress()
def test_annotations():
kobuddy.print_annotations()
def test_books():
kobuddy.print_books()
| 2.171875 | 2 |
tanks/views.py | BArdelean/djangostuff | 0 | 17780 | from django.shortcuts import render
from .models import Tank
from django.db import models
from django.http import HttpResponse
from django.views import View
# Create your views here.
# The view for the created model Tank
def tank_view(request):
queryset = Tank.objects.all()
context = {
'object': queryset
}
return render(request, "tankbattle.html", context)
def tank_1(request, pk):
queryset = Tank.objects.get(pk=1)
context = {
'object': queryset
}
return render(request, 'tankbattle.html', context)
def tank_2(request, pk):
queryset = Tank.objects.get(pk=2)
context = {
'object': queryset
}
return render(request, 'tankbattle.html', context)
def tank_3(request, pk):
queryset = Tank.objects.get(pk=3)
context = {
'object': queryset
}
return render(request, 'tankbattle.html', context)
def tank_4(request, pk):
queryset = Tank.objects.get(pk=4)
context = {
'object': queryset
}
return render(request, 'tankbattle.html', context)
| 2.234375 | 2 |
VQVAE/main.py | bipashasen/How2Sign-Blob | 0 | 17781 | import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import argparse
from tqdm import tqdm
import sys
import distributed as dist
import utils
from models.vqvae import VQVAE, VQVAE_Blob2Full
from models.discriminator import discriminator
visual_folder = '/home2/bipasha31/python_scripts/CurrentWork/samples/VQVAE'
os.makedirs(visual_folder, exist_ok=True)
verbose = False
save_idx_global = 0
save_at = 100
did = 0
models = {
'gan': 0,
'vae': 1
}
model_to_train = models['vae']
results = {
'n_updates': 0,
'recon_errors': [],
'loss_vals': [],
'perplexities': [],
'd_loss': []
}
device = 'cuda:0'
def main(args):
"""
Set up VQ-VAE model with components defined in ./models/ folder
"""
model = VQVAE(args.n_hiddens, args.n_residual_hiddens,
args.n_residual_layers, args.n_embeddings,
args.embedding_dim, args.beta, device)
if args.ckpt:
model.load_state_dict(torch.load(args.ckpt)['model'])
model = model.to(device)
if args.test:
loader = utils.load_data_and_data_loaders(args.dataset, args.batch_size, test=True)
test(loader, model)
return
"""
Load data and define batch data loaders
"""
items = utils.load_data_and_data_loaders(args.dataset, args.batch_size)
training_loader, validation_loader = items[2], items[3]
x_train_var = items[4]
"""
Set up optimizer and training loop
"""
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, amsgrad=True)
model.train()
if model_to_train == models['gan']:
train_vqgan(args, training_loader, validation_loader, x_train_var, model, optimizer)
else:
train(args, training_loader, validation_loader, x_train_var, model, optimizer)
def test(loader, model):
for i, data in enumerate(tqdm(loader)):
x, _ = data
x = x.to(device)
with torch.no_grad():
_ = model(x, save_idx=f'{i}', visual_folder=visual_folder)
def train(args, training_loader, validation_loader, x_train_var, model, optimizer):
global save_idx_global
for i in range(args.n_updates):
(x, _) = next(iter(training_loader))
x = x.to(device)
optimizer.zero_grad()
save_idx = None
embedding_loss, x_hat, perplexity = model(x)
recon_loss = torch.mean((x_hat - x)**2) / x_train_var
loss = recon_loss + embedding_loss
loss.backward()
optimizer.step()
results["recon_errors"].append(recon_loss.cpu().detach().numpy())
results["perplexities"].append(perplexity.cpu().detach().numpy())
results["loss_vals"].append(loss.cpu().detach().numpy())
results["n_updates"] = i
if i % save_at == 0:
save_idx = save_idx_global
save_idx_global += 1
model.eval()
with torch.no_grad():
for vi in tqdm(range(10)):
(x, _) = next(iter(validation_loader))
x = x.to(device)
_, _, _ = model(x, verbose=verbose, save_idx=f'{save_idx}_{vi}', visual_folder=visual_folder)
model.train()
if i % args.log_interval == 0 and dist.is_primary():
"""
save model and print values
"""
if args.save:
hyperparameters = args.__dict__
utils.save_model_and_results(
model, optimizer, results, hyperparameters, args.filename)
print('Update #', i, 'Recon Error:',
np.mean(results["recon_errors"][-args.log_interval:]),
'Loss', np.mean(results["loss_vals"][-args.log_interval:]),
'Perplexity:', np.mean(results["perplexities"][-args.log_interval:]))
def train_vqgan(args, training_loader, validation_loader, x_train_var, model, optimizer):
global save_idx_global
c_mse = nn.MSELoss()
disc = discriminator().to(device)
optim_D = optim.Adam(disc.parameters(), lr=args.learning_rate, amsgrad=True)
for i in range(args.n_updates):
(x, _) = next(iter(training_loader))
x = x.to(device)
optimizer.zero_grad()
optim_D.zero_grad()
save_idx = None
if i % save_at == 0 and i > 0:
save_idx = save_idx_global
save_idx_global += 1
embedding_loss, x_hat, perplexity = \
model(x, verbose=verbose, save_idx=save_idx, visual_folder=visual_folder)
recon_loss = torch.mean((x_hat - x)**2) / x_train_var
loss = recon_loss + embedding_loss
'''
adding the perceptual loss here - patch loss of real and fake
'''
B = args.batch_size
D = 16 * 16
ones = torch.ones((B, D), dtype=torch.float32, device=device)
zeros = torch.zeros((B, D), dtype=torch.float32, device=device)
if i % 2 == 0:
fake = disc(x_hat).view(B, D)
loss += c_mse(fake, ones)
else:
fake = disc(x_hat.clone().detach()).view(B, D)
real = disc(x).view(B, D)
d_loss = c_mse(real, ones) + c_mse(fake, zeros)
results["d_loss"].append(d_loss.cpu().detach().numpy())
d_loss.backward()
optim_D.step()
loss.backward()
optimizer.step()
results["recon_errors"].append(recon_loss.cpu().detach().numpy())
results["perplexities"].append(perplexity.cpu().detach().numpy())
results["loss_vals"].append(loss.cpu().detach().numpy())
results["n_updates"] = i
if i % args.log_interval == 0:
"""
save model and print values
"""
if args.save:
hyperparameters = args.__dict__
utils.save_model_and_results(
model, optimizer, results, hyperparameters, args.filename)
print('Update #', i, 'Recon Error:',
np.mean(results["recon_errors"][-args.log_interval:]),
'Loss', np.mean(results["loss_vals"][-args.log_interval:]),
'Discriminator Loss', np.mean(results['d_loss'][-args.log_interval:]),
'Perplexity:', np.mean(results["perplexities"][-args.log_interval:]), flush=True)
if __name__ == "__main__":
# train_vqgan()
# train_blob2full()
parser = argparse.ArgumentParser()
"""
Hyperparameters
"""
timestamp = utils.readable_timestamp()
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--n_updates", type=int, default=50000)
parser.add_argument("--n_hiddens", type=int, default=128)
parser.add_argument("--n_residual_hiddens", type=int, default=32)
parser.add_argument("--n_residual_layers", type=int, default=2)
parser.add_argument("--embedding_dim", type=int, default=64)
parser.add_argument("--n_embeddings", type=int, default=512)
parser.add_argument("--beta", type=float, default=.25)
parser.add_argument("--learning_rate", type=float, default=3e-4)
parser.add_argument("--ckpt", type=str)
parser.add_argument("--log_interval", type=int, default=3)
parser.add_argument("--save_at", type=int, default=100)
parser.add_argument("--device_id", type=int, default=0)
parser.add_argument("--dataset", type=str, default='HandGestures')
parser.add_argument("--test", action='store_true')
# whether or not to save model
parser.add_argument("-save", action="store_true")
parser.add_argument("--filename", type=str, default=timestamp)
args = parser.parse_args()
args.save = True
if args.save and dist.is_primary():
print('Results will be saved in ./results/vqvae_' + args.filename + '.pth')
args.n_gpu = torch.cuda.device_count()
port = (
2 ** 15
+ 2 ** 14
+ hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
)+1
print(f'port: {port}')
print(args)
dist.launch(main, args.n_gpu, 1, 0, f"tcp://127.0.0.1:{port}", args=(args,))
| 2.15625 | 2 |
api/voters/tests/test_models.py | citizenlabsgr/voter-engagement | 6 | 17782 | # pylint: disable=unused-variable,unused-argument,expression-not-assigned
from django.forms.models import model_to_dict
import arrow
import pytest
from expecter import expect
from api.elections.models import Election
from .. import models
@pytest.fixture
def info():
return models.Identity(
first_name="John",
last_name="Doe",
birth_date=arrow.get("1985-06-19"),
)
@pytest.fixture
def voter(info):
return models.Voter(
email="<EMAIL>",
**model_to_dict(info),
)
@pytest.fixture
def status(voter):
return models.Status(
voter=voter,
election=Election(name="Sample Election"),
)
def describe_registration_info():
def describe_birth_month():
def is_parsed_from_date(info):
expect(info.birth_month) == "June"
def describe_birth_year():
def is_parsed_from_date(info):
expect(info.birth_year) == 1985
def describe_voter():
def describe_str():
def is_based_on_name(voter):
expect(str(voter)) == "<NAME>"
def describe_status():
def describe_str():
def is_based_on_voter_and_election(status):
expect(str(status)) == "Sample Election: <NAME>"
| 2.15625 | 2 |
leetcode/easy/strobogrammatic-number.py | vtemian/interviews-prep | 8 | 17783 | class Solution:
def isStrobogrammatic(self, num: str) -> bool:
strobogrammatic = {
'1': '1',
'0': '0',
'6': '9',
'9': '6',
'8': '8'
}
for idx, digit in enumerate(num):
if digit not in strobogrammatic or strobogrammatic[digit] != num[len(num) - idx -1]:
return False
return True
| 3.625 | 4 |
app/main/helpers/direct_award_helpers.py | uk-gov-mirror/alphagov.digitalmarketplace-buyer-frontend | 4 | 17784 | <filename>app/main/helpers/direct_award_helpers.py
from operator import itemgetter
def is_direct_award_project_accessible(project, user_id):
return any([user['id'] == user_id for user in project['users']])
def get_direct_award_projects(data_api_client, user_id, return_type="all", sort_by_key=None, latest_first=None):
projects = data_api_client.find_direct_award_projects(user_id, latest_first=latest_first).get('projects', [])
res = {
"open_projects": [],
"closed_projects": [],
}
for project in projects:
if project['lockedAt'] is None:
res['open_projects'].append(project)
else:
res['closed_projects'].append(project)
if return_type == "all":
if sort_by_key:
res['open_projects'].sort(key=itemgetter(sort_by_key))
res['closed_projects'].sort(key=itemgetter(sort_by_key))
return res
else:
if sort_by_key:
res[return_type].sort(key=itemgetter(sort_by_key))
return res[return_type]
| 2.390625 | 2 |
403-Frog-Jump/solution.py | Tanych/CodeTracking | 0 | 17785 | <filename>403-Frog-Jump/solution.py<gh_stars>0
class Solution(object):
def dfs(self,stones,graph,curpos,lastjump):
if curpos==stones[-1]:
return True
# since the jump need based on lastjump
# only forward,get rid of the stay at the same pos
rstart=max(curpos+lastjump-1,curpos+1)
rend=min(curpos+lastjump+1,stones[-1])+1
for nextpos in xrange(rstart,rend):
if nextpos in graph and self.dfs(stones,graph,nextpos,nextpos-curpos):
return True
return False
def canCross(self, stones):
"""
:type stones: List[int]
:rtype: bool
"""
if not stones:
return True
if stones[1]!=1:
return False
graph={val:idx for idx,val in enumerate(stones)}
return self.dfs(stones,graph,1,1)
| 3.09375 | 3 |
daemon/api/endpoints/partial/pod.py | vishalbelsare/jina | 2 | 17786 | <reponame>vishalbelsare/jina
from typing import Optional, Dict, Any
from fastapi import APIRouter
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
from ....excepts import PartialDaemon400Exception
from ....models import PodModel
from ....models.partial import PartialStoreItem
from ....stores import partial_store as store
router = APIRouter(prefix='/pod', tags=['pod'])
@router.get(
path='',
summary='Get status of a running Pod',
response_model=PartialStoreItem,
)
async def _status():
"""
.. #noqa: DAR101
.. #noqa: DAR201"""
return store.item
@router.post(
path='',
summary='Create a Pod',
description='Create a Pod and add it to the store',
status_code=201,
response_model=PartialStoreItem,
)
async def _create(pod: 'PodModel'):
"""
.. #noqa: DAR101
.. #noqa: DAR201"""
try:
args = ArgNamespace.kwargs2namespace(pod.dict(), set_pod_parser())
return store.add(args)
except Exception as ex:
raise PartialDaemon400Exception from ex
@router.put(
path='/rolling_update',
summary='Run a rolling_update operation on the Pod object',
response_model=PartialStoreItem,
)
async def rolling_update(uses_with: Optional[Dict[str, Any]] = None):
"""
.. #noqa: DAR101
.. #noqa: DAR201
"""
try:
return await store.rolling_update(uses_with=uses_with)
except ValueError as ex:
raise PartialDaemon400Exception from ex
@router.put(
path='/scale',
summary='Run a scale operation on the Pod object',
response_model=PartialStoreItem,
)
async def scale(replicas: int):
"""
.. #noqa: DAR101
.. #noqa: DAR201
"""
try:
return await store.scale(replicas=replicas)
except ValueError as ex:
raise PartialDaemon400Exception from ex
@router.delete(
path='',
summary='Terminate the running Pod',
description='Terminate a running Pod and release its resources',
)
async def _delete():
"""
.. #noqa: DAR101
.. #noqa: DAR201"""
try:
store.delete()
except Exception as ex:
raise PartialDaemon400Exception from ex
@router.on_event('shutdown')
def _shutdown():
"""
.. #noqa: DAR101
.. #noqa: DAR201"""
store.delete()
| 2.234375 | 2 |
runehistory_api/app/config.py | RuneHistory/runehistory-api | 0 | 17787 | import yaml
class Config:
def __init__(self, path: str):
self.path = path
self.cfg = {}
self.parse()
def parse(self):
with open(self.path, 'r') as f:
self.cfg = yaml.load(f)
@property
def secret(self) -> str:
return self.cfg.get('secret')
@property
def db_connection_string(self) -> str:
return self.cfg.get('db_connection_string')
@property
def db_host(self) -> str:
return self.cfg.get('db_host', '127.0.0.1')
@property
def db_port(self) -> int:
return self.cfg.get('db_port', 27017)
@property
def db_name(self) -> int:
return self.cfg.get('db_name', 'runehistory')
| 2.828125 | 3 |
csv_filter/__init__.py | mooore-digital/csv_filter | 1 | 17788 | #!/usr/bin/env python3
import argparse
import csv
import logging
import os
import re
import sys
DELIMITER = ','
class CsvFilter:
def __init__(
self,
file=None,
deduplicate=False,
filter_query=None,
filter_inverse=False,
ignore_case=False,
verbose=False,
delimiter=DELIMITER
):
self.file = file
self.deduplicate = deduplicate
self.filter = filter_query
self.filter_inverse = filter_inverse
self.ignore_case = ignore_case
self.verbose = verbose
self.delimiter = delimiter
self.logger = logging.getLogger('deduplicate')
if self.verbose:
self.logger.setLevel(logging.DEBUG)
def apply(self):
base_path = os.getcwd()
source_path = base_path + '/' + self.file
destination_path = source_path.replace('.csv', '.filtered.csv')
filtered_items = self.filter_items(source_path)
self.store_items(destination_path, filtered_items)
def filter_items(self, file_path):
result = []
deduplicate_column_index = False
deduplicate_key_values = []
filter_column = False
filter_column_index = False
filter_pattern = False
counter = 0
re_flags = 0
if self.ignore_case:
re_flags = re.IGNORECASE
if self.verbose:
print('* Filtering file', file_path)
if self.filter:
filter_match = re.match('^(.+)=(.+)$', self.filter)
if filter_match:
filter_column = filter_match.group(1)
filter_pattern = filter_match.group(2)
with open(file_path, 'rt') as csv_file:
for row in csv.reader(csv_file, delimiter=self.delimiter):
if counter == 0:
if self.deduplicate:
deduplicate_column_index = row.index(self.deduplicate)
if filter_column:
filter_column_index = row.index(filter_column)
counter += 1
result.append(row)
continue
valid = False
if self.deduplicate and deduplicate_column_index is not False:
value = row[deduplicate_column_index]
if self.ignore_case:
value = value.lower()
if value in deduplicate_key_values:
valid = False
else:
deduplicate_key_values.append(value)
if filter_column_index is not False:
value = row[filter_column_index]
if bool(re.match(filter_pattern, value, re_flags)) is not self.filter_inverse:
valid = True
if valid:
result.append(row)
counter += 1
if self.verbose:
print('* Filtered', counter, 'items to', len(result))
return result
def store_items(self, file_path, items):
if self.verbose:
print('* Storing items to', file_path)
with open(file_path, 'wt') as csv_file:
writer = csv.writer(csv_file, delimiter=self.delimiter)
for row in items:
writer.writerow(row)
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--file', '-f', help='File to filter')
parser.add_argument('--deduplicate', help='Deduplication column to be applied', default=False)
parser.add_argument('--filter', help='Filter to be applied', default=False)
parser.add_argument('--filter_inverse', action='store_true', help='Inverse filter matches', default=False)
parser.add_argument('--ignore_case', '-i', action='store_true', help='Match values case insensitive', default=False)
parser.add_argument('--verbose', '-v', action='store_true', help='Enable verbose')
return parser.parse_args()
def main():
args = parse_arguments()
CsvFilter(
file=args.file,
deduplicate=args.deduplicate,
filter_query=args.filter,
filter_inverse=args.filter_inverse,
ignore_case=args.ignore_case,
verbose=args.verbose
).apply()
return 0
if __name__ == '__main__':
sys.exit(main())
| 3.015625 | 3 |
scientist/__init__.py | boxed/scientist | 0 | 17789 | <reponame>boxed/scientist
def check_candidate(a, candidate, callback_when_different, *args, **kwargs):
control_result = None
candidate_result = None
control_exception = None
candidate_exception = None
reason = None
try:
control_result = a(*args, **kwargs)
except BaseException as e:
control_exception = e
try:
candidate_result = candidate(*args, **kwargs)
if control_exception is not None:
reason = 'old code raised, new did not'
elif control_result != candidate_result:
reason = 'different results'
except BaseException as e:
candidate_exception = e
if control_exception is None:
reason = 'new code raised, old did not'
else:
if type(control_exception) != type(candidate_exception):
reason = 'new and old both raised exception, but different types'
elif control_exception.args != candidate_exception.args:
reason = 'new and old both raised exception, but with different data'
if reason is not None:
callback_when_different(
control_result=control_result,
candidate_result=candidate_result,
control_exception=control_exception,
candidate_exception=candidate_exception,
reason=reason,
)
if control_exception is not None:
raise control_exception
return control_result
| 2.859375 | 3 |
bluebottle/impact/tests/test_api.py | terrameijar/bluebottle | 10 | 17790 | # coding=utf-8
from builtins import str
import json
from django.contrib.auth.models import Group, Permission
from django.urls import reverse
from rest_framework import status
from bluebottle.impact.models import ImpactGoal
from bluebottle.impact.tests.factories import (
ImpactTypeFactory, ImpactGoalFactory
)
from bluebottle.time_based.tests.factories import DateActivityFactory
from bluebottle.members.models import MemberPlatformSettings
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.utils import BluebottleTestCase, JSONAPITestClient
class ImpactTypeListAPITestCase(BluebottleTestCase):
def setUp(self):
super(ImpactTypeListAPITestCase, self).setUp()
self.client = JSONAPITestClient()
self.types = ImpactTypeFactory.create_batch(10)
self.url = reverse('impact-type-list')
self.user = BlueBottleUserFactory()
def test_get(self):
response = self.client.get(self.url, user=self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()['data']), len(self.types))
resource = response.json()['data'][0]['attributes']
self.assertTrue('slug' in resource)
self.assertTrue('name' in resource)
self.assertTrue('unit' in resource)
self.assertTrue('text' in resource)
self.assertTrue('text-with-target' in resource)
self.assertTrue('text-passed' in resource)
resource_type = response.json()['data'][0]['type']
self.assertEqual(resource_type, 'activities/impact-types')
def test_get_anonymous(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()['data']), len(self.types))
def test_get_only_active(self):
self.types[0].active = False
self.types[0].save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()['data']), len(self.types) - 1)
def test_get_closed(self):
MemberPlatformSettings.objects.update(closed=True)
response = self.client.get(self.url, user=self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_closed_anonymous(self):
MemberPlatformSettings.objects.update(closed=True)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post(self):
response = self.client.post(self.url, user=self.user)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class ImpactGoalListAPITestCase(BluebottleTestCase):
def setUp(self):
super(ImpactGoalListAPITestCase, self).setUp()
self.client = JSONAPITestClient()
self.activity = DateActivityFactory.create()
self.type = ImpactTypeFactory.create()
self.url = reverse('impact-goal-list')
self.data = {
'data': {
'type': 'activities/impact-goals',
'attributes': {
'target': 1.5
},
'relationships': {
'activity': {
'data': {
'type': 'activities/time-based/dates',
'id': self.activity.pk
},
},
'type': {
'data': {
'type': 'activities/impact-types',
'id': self.type.pk
},
}
}
}
}
def test_create(self):
response = self.client.post(
self.url,
json.dumps(self.data),
user=self.activity.owner
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
resource_type = response.json()['data']['type']
self.assertEqual(resource_type, 'activities/impact-goals')
goal = ImpactGoal.objects.get(pk=response.json()['data']['id'])
self.assertEqual(
goal.target, self.data['data']['attributes']['target']
)
self.assertEqual(goal.type, self.type)
self.assertEqual(goal.activity, self.activity)
def test_create_no_target(self):
del self.data['data']['attributes']['target']
response = self.client.post(
self.url,
json.dumps(self.data),
user=self.activity.owner
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
resource_type = response.json()['data']['type']
self.assertEqual(resource_type, 'activities/impact-goals')
goal = ImpactGoal.objects.get(pk=response.json()['data']['id'])
self.assertEqual(
goal.target, None
)
self.assertEqual(goal.type, self.type)
self.assertEqual(goal.activity, self.activity)
def test_create_non_owner(self):
response = self.client.post(
self.url,
json.dumps(self.data),
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_anonymous(self):
response = self.client.post(
self.url,
json.dumps(self.data),
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class ImpactGoalDetailsAPITestCase(BluebottleTestCase):
def setUp(self):
super(ImpactGoalDetailsAPITestCase, self).setUp()
self.client = JSONAPITestClient()
self.activity = DateActivityFactory.create()
self.type = ImpactTypeFactory.create()
self.goal = ImpactGoalFactory(type=self.type, activity=self.activity)
self.url = reverse('impact-goal-details', args=(self.goal.pk, ))
self.data = {
'data': {
'type': 'activities/impact-goals',
'id': self.goal.pk,
'attributes': {
'target': 1.5
},
}
}
def test_get(self):
response = self.client.get(
self.url,
user=self.activity.owner
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.json()['data']
self.assertEqual(data['type'], 'activities/impact-goals')
self.assertEqual(
data['attributes']['target'], self.goal.target
)
self.assertEqual(
data['relationships']['type']['data']['id'],
str(self.goal.type.pk)
)
self.assertEqual(
data['relationships']['activity']['data']['id'],
str(self.goal.activity.pk)
)
def test_get_incomplete(self):
self.goal.target = None
self.goal.save()
response = self.client.get(
self.url,
user=self.activity.owner
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.json()['data']
self.assertEqual(data['meta']['required'], [])
def test_get_non_owner(self):
response = self.client.get(
self.url,
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_anonymous(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_closed_anonymous(self):
anonymous = Group.objects.get(name='Anonymous')
anonymous.permissions.remove(
Permission.objects.get(codename='api_read_dateactivity')
)
MemberPlatformSettings.objects.update(closed=True)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update(self):
response = self.client.patch(
self.url,
data=json.dumps(self.data),
user=self.activity.owner
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.json()['data']
self.assertEqual(data['type'], 'activities/impact-goals')
self.assertEqual(
data['attributes']['target'],
self.data['data']['attributes']['target']
)
self.goal.refresh_from_db()
self.assertEqual(
self.goal.target,
self.data['data']['attributes']['target']
)
def test_update_other_user(self):
response = self.client.patch(
self.url,
data=json.dumps(self.data),
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_update_anonymous(self):
response = self.client.patch(
self.url,
data=json.dumps(self.data)
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete(self):
response = self.client.delete(
self.url,
user=self.activity.owner
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
with self.assertRaises(ImpactGoal.DoesNotExist):
ImpactGoal.objects.get(pk=self.goal.pk)
def test_delete_other_user(self):
response = self.client.delete(
self.url,
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_delete_anonymous(self):
response = self.client.delete(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
| 2.140625 | 2 |
dotmotif/parsers/v2/test_v2_parser.py | aplbrain/dotmotif | 28 | 17791 | <filename>dotmotif/parsers/v2/test_v2_parser.py
from . import ParserV2
import dotmotif
import unittest
_THREE_CYCLE = """A -> B\nB -> C\nC -> A\n"""
_THREE_CYCLE_NEG = """A !> B\nB !> C\nC !> A\n"""
_THREE_CYCLE_INH = """A -| B\nB -| C\nC -| A\n"""
_THREE_CYCLE_NEG_INH = """A !| B\nB !| C\nC !| A\n"""
_ABC_TO_D = """\nA -> D\nB -> D\nC -> D\n"""
_THREE_CYCLE_CSV = """\nA,B\nB,C\nC,A\n"""
_THREE_CYCLE_NEG_CSV = """\nA,B\nB,C\nC,A\n"""
class TestDotmotif_Parserv2_DM(unittest.TestCase):
def test_sanity(self):
self.assertEqual(1, 1)
def test_dm_parser(self):
dm = dotmotif.Motif(_THREE_CYCLE)
self.assertEqual(len(dm._g.edges()), 3)
self.assertEqual(len(dm._g.nodes()), 3)
def test_dm_parser_actions(self):
dm = dotmotif.Motif(_THREE_CYCLE)
self.assertEqual([e[2]["action"] for e in dm._g.edges(data=True)], ["SYN"] * 3)
dm = dotmotif.Motif(_THREE_CYCLE_INH)
self.assertEqual([e[2]["action"] for e in dm._g.edges(data=True)], ["INH"] * 3)
def test_dm_parser_edge_exists(self):
dm = dotmotif.Motif(_THREE_CYCLE)
self.assertEqual([e[2]["exists"] for e in dm._g.edges(data=True)], [True] * 3)
dm = dotmotif.Motif(_THREE_CYCLE_NEG)
self.assertEqual([e[2]["exists"] for e in dm._g.edges(data=True)], [False] * 3)
dm = dotmotif.Motif(_THREE_CYCLE_NEG_INH)
self.assertEqual([e[2]["exists"] for e in dm._g.edges(data=True)], [False] * 3)
class TestDotmotif_Parserv2_DM_Macros(unittest.TestCase):
def test_macro_not_added(self):
exp = """\
edge(A, B) {
A -> B
}
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm._g.edges()), 0)
def test_simple_macro(self):
exp = """\
edge(A, B) {
A -> B
}
edge(C, D)
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm._g.edges()), 1)
def test_simple_macro_construction(self):
exp = """\
edge(A, B) {
A -> B
}
edge(C, D)
"""
dm = dotmotif.Motif(exp)
exp_edge = list(dm._g.edges(data=True))[0]
self.assertEqual(exp_edge[0], "C")
self.assertEqual(exp_edge[1], "D")
def test_multiline_macro_construction(self):
exp = """\
dualedge(A, B) {
A -> B
B -> A
}
dualedge(C, D)
"""
dm = dotmotif.Motif(exp)
exp_edge = list(dm._g.edges(data=True))[0]
self.assertEqual(exp_edge[0], "C")
self.assertEqual(exp_edge[1], "D")
def test_undefined_macro(self):
exp = """\
dualedge(A, B) {
A -> B
B -> A
}
foo(C, D)
"""
# with self.assertRaises(ValueError):
with self.assertRaises(Exception):
dotmotif.Motif(exp)
def test_wrong_args_macro(self):
exp = """\
edge(A, B) {
A -> B
B -> A
}
edge(C, D, E)
"""
# with self.assertRaises(ValueError):
with self.assertRaises(Exception):
dotmotif.Motif(exp)
def test_more_complex_macro(self):
exp = """\
tri(A, B, C) {
A -> B
B -> C
C -> A
}
tri(C, D, E)
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 3)
def test_macro_reuse(self):
exp = """\
tri(A, B, C) {
A -> B
B -> C
C -> A
}
tri(C, D, E)
tri(F, G, H)
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 6)
def test_conflicting_macro_invalid_edge_throws(self):
exp = """\
tri(A, B, C) {
A -> B
B -> C
C -> A
}
nontri(A, B, C) {
A !> B
B !> C
C !> A
}
tri(C, D, E)
nontri(D, E, F)
"""
# with self.assertRaises(dotmotif.validators.DisagreeingEdgesValidatorError):
with self.assertRaises(Exception):
dotmotif.Motif(exp)
def test_nested_macros(self):
exp = """\
dualedge(A, B) {
A -> B
B -> A
}
dualtri(A, B, C) {
dualedge(A, B)
dualedge(B, C)
dualedge(C, A)
}
dualtri(foo, bar, baz)
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 6)
def test_deeply_nested_macros(self):
exp = """\
edge(A, B) {
A -> B
}
dualedge(A, B) {
edge(A, B)
edge(B, A)
}
dualtri(A, B, C) {
dualedge(A, B)
dualedge(B, C)
dualedge(C, A)
}
dualtri(foo, bar, baz)
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 6)
def test_clustercuss_macros_no_repeats(self):
exp = """\
edge(A, B) {
A -> B
}
dualedge(A, B) {
edge(A, B)
edge(B, A)
}
dualtri(A, B, C) {
dualedge(A, B)
dualedge(B, C)
dualedge(C, A)
}
dualtri(foo, bar, baz)
dualtri(foo, bar, baf)
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 10)
def test_comment_in_macro(self):
exp = """\
# Outside comment
edge(A, B) {
# Inside comment
A -> B
}
dualedge(A, B) {
# Nested-inside comment
edge(A, B)
edge(B, A)
}
dualedge(foo, bar)
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 2)
def test_combo_macro(self):
exp = """\
edge(A, B) {
A -> B
}
dualedge(A, B) {
# Nested-inside comment!
edge(A, B)
B -> A
}
dualedge(foo, bar)
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 2)
def test_comment_macro_inline(self):
exp = """\
edge(A, B) {
A -> B
}
dualedge(A, B) {
# Nested-inside comment!
edge(A, B) # inline comment
B -> A
}
dualedge(foo, bar) # inline comment
# standalone comment
foo -> bar # inline comment
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 2)
def test_alphanumeric_variables(self):
exp = """\
edge(A, B) {
A -> B
}
dualedge(A1, B) {
# Nested-inside comment!
edge(A1, B) # inline comment
B -> A1
}
dualedge(foo_1, bar_2) # inline comment
# standalone comment
foo_1 -> bar_2 # inline comment
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 2)
self.assertEqual(list(dm._g.nodes()), ["foo_1", "bar_2"])
self.assertEqual(type(list(dm._g.nodes())[0]), str)
new_exp = """
L1 -> Mi1
L1 -> Tm3
L3 -> Mi9
"""
dm = dotmotif.Motif(new_exp)
self.assertEqual(list(dm._g.nodes()), ["L1", "Mi1", "Tm3", "L3", "Mi9"])
class TestDotmotif_Parserv2_DM_EdgeAttributes(unittest.TestCase):
def test_basic_edge_attr(self):
exp = """\
Aa -> Ba [type == 1]
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm._g.edges()), 1)
u, v, d = list(dm._g.edges(["Aa", "Bb"], data=True))[0]
self.assertEqual(type(list(dm._g.nodes())[0]), str)
self.assertEqual(type(list(dm._g.nodes())[1]), str)
self.assertEqual(d["constraints"]["type"], {"==": [1]})
def test_edge_multi_attr(self):
exp = """\
Aa -> Ba [type != 1, type != 12]
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm._g.edges()), 1)
u, v, d = list(dm._g.edges(data=True))[0]
self.assertEqual(d["constraints"]["type"], {"!=": [1, 12]})
def test_edge_macro_attr(self):
exp = """\
macro(Aa, Ba) {
Aa -> Ba [type != 1, type != 12]
}
macro(X, Y)
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm._g.edges()), 1)
u, v, d = list(dm._g.edges(data=True))[0]
self.assertEqual(d["constraints"]["type"], {"!=": [1, 12]})
class TestDotmotif_Parserv2_DM_NodeAttributes(unittest.TestCase):
def test_basic_node_attr(self):
exp = """\
Aa -> Ba
Aa.type = "excitatory"
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm.list_node_constraints()), 1)
self.assertEqual(list(dm.list_node_constraints().keys()), ["Aa"])
def test_node_multi_attr(self):
exp = """\
Aa -> Ba
Aa.type = "excitatory"
Aa.size = 4.5
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm.list_node_constraints()), 1)
self.assertEqual(len(dm.list_node_constraints()["Aa"]), 2)
self.assertEqual(dm.list_node_constraints()["Aa"]["type"]["="], ["excitatory"])
self.assertEqual(dm.list_node_constraints()["Aa"]["size"]["="], [4.5])
self.assertEqual(list(dm.list_node_constraints().keys()), ["Aa"])
def test_multi_node_attr(self):
exp = """\
Aa -> Ba
Aa.type = "excitatory"
Ba.size=4.0
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm.list_node_constraints()), 2)
self.assertEqual(list(dm.list_node_constraints().keys()), ["Aa", "Ba"])
def test_node_macro_attr(self):
exp = """\
macro(A) {
A.type = "excitatory"
A.size >= 4.0
}
Aaa -> Ba
macro(Aaa)
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm.list_node_constraints()), 1)
self.assertEqual(list(dm.list_node_constraints().keys()), ["Aaa"])
exp = """\
macro(A) {
A.type = "excitatory"
A.size >= 4.0
}
Aaa -> Ba
macro(Aaa)
macro(Ba)
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm.list_node_constraints()), 2)
self.assertEqual(list(dm.list_node_constraints().keys()), ["Aaa", "Ba"])
class TestDynamicNodeConstraints(unittest.TestCase):
def test_dynamic_constraints(self):
"""
Test that comparisons may be made between variables, e.g.:
A.type != B.type
"""
exp = """\
A -> B
A.radius < B.radius
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm.list_dynamic_node_constraints()), 1)
def test_dynamic_constraints_in_macro(self):
"""
Test that comparisons may be made between variables in a macro, e.g.:
A.type != B.type
"""
exp = """\
macro(A, B) {
A.radius > B.radius
}
macro(A, B)
A -> B
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm.list_dynamic_node_constraints()), 1)
| 2.6875 | 3 |
examples/classification_mnist/main.py | yassersouri/fandak | 15 | 17792 | <gh_stars>10-100
from typing import List
import click
import torch
from fandak.utils import common_config
from fandak.utils import set_seed
from fandak.utils.config import update_config
from proj.config import get_config_defaults
from proj.datasets import MNISTClassification
from proj.evaluators import ValidationEvaluator
from proj.models import MLPModel
from proj.trainers import SimpleTrainer
@click.command()
@common_config
@click.option("--exp-name", default="")
def main(file_configs: List[str], set_configs: List[str], exp_name: str):
cfg = update_config(
default_config=get_config_defaults(),
file_configs=file_configs,
set_configs=set_configs,
)
if exp_name != "":
cfg.defrost()
cfg.experiment_name = exp_name
cfg.freeze()
print(cfg)
# set_seed(cfg.system.seed)
device = torch.device(cfg.system.device)
train_db = MNISTClassification(cfg, train=True)
test_db = MNISTClassification(cfg, train=False)
if cfg.model.name == "MLP":
model = MLPModel(cfg)
else:
raise Exception("Invalid model name (%s)" % cfg.model.name)
evaluators = [ValidationEvaluator(cfg, test_db, model, device)]
trainer = SimpleTrainer(
cfg, cfg.experiment_name, train_db, model, device, evaluators
)
trainer.train()
trainer.save_training()
if __name__ == "__main__":
main()
| 2.21875 | 2 |
tests/test_problem_solving_algorithms_sorting.py | mxdzi/hackerrank | 0 | 17793 | from problem_solving.algorithms.sorting import *
def test_q1_big_sorting(capsys, monkeypatch):
inputs = ["6",
"31415926535897932384626433832795",
"1",
"3",
"10",
"3",
"5"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q1_big_sorting.main()
captured = capsys.readouterr()
output = ("1\n"
"3\n"
"3\n"
"5\n"
"10\n"
"31415926535897932384626433832795\n")
assert captured.out == output
def test_q12_find_the_median(capsys, monkeypatch):
inputs = ["7",
"0 1 2 4 6 5 3"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q12_find_the_median.main()
captured = capsys.readouterr()
output = "3\n"
assert captured.out == output
| 3.03125 | 3 |
runOtakuBot.py | Eagleheardt/otakuBot | 0 | 17794 | <reponame>Eagleheardt/otakuBot<filename>runOtakuBot.py
import sqlite3
from sqlite3 import Error
import os
import time
import datetime
import re
import random
import schedule
import cryptography
from apscheduler.schedulers.background import BackgroundScheduler
from slackclient import SlackClient
from cryptography.fernet import Fernet
conn = sqlite3.connect('/home/ubuntu/otakuBot/data/anime.db')
serverCursor = conn.cursor()
keyFile = open('/home/ubuntu/otakuBot/data/otakubot_token.key', 'rb')
key = keyFile.read()
keyFile.close()
f = Fernet(key)
encryptedTokenFile = open('/home/ubuntu/otakuBot/data/otakubot_token.encrypted', 'rb')
encryptedToken = encryptedTokenFile.read()
decryptedToken = f.decrypt(encryptedToken)
SLACK_BOT_TOKEN = decryptedToken.decode()
# instantiate Slack client
slack_client = SlackClient(SLACK_BOT_TOKEN)
# starterbot's user ID in Slack: value is assigned after the bot starts up
otakuBotID = None
# constants
RTM_READ_DELAY = 0.5 # 0.5 second delay in reading events
def stdOut(s):
curDate = datetime.datetime.today().strftime('%Y-%m-%d')
curTime = datetime.datetime.now().strftime('%H:%M:%S')
logFile = open((("/home/ubuntu/logs/{0}.log").format(curDate)),"a")
logFile.write(("{0}: {1}\n").format(curTime,s))
logFile.close()
return
def logIt():
curDate = datetime.datetime.today().strftime('%Y-%m-%d')
curTime = datetime.datetime.now().strftime('%H:%M:%S')
logFile = open((("/home/ubuntu/logs/{0}.log").format(curDate)),"a")
logFile.write(("{0}: Otaku 15 minute check in!\n").format(curTime))
logFile.close()
return
schedule.every(15).minutes.do(logIt)
def SQLReturn(aConn,sqlCmd):
reportCur = aConn.cursor()
reportCur.execute(sqlCmd)
SQLResults = reportCur.fetchall()
reportCur.close()
return SQLResults
def insertQuote (aUser,theQuote):
newCur = conn.cursor()
newCur.execute(("""
INSERT INTO
Quotes (User, Words)
VALUES
('{0}','{1}');
""").format(aUser,theQuote))
newCur.close()
conn.commit()
return
def insertAniMusic (aUser,theLink):
newCur = conn.cursor()
newCur.execute(("""
INSERT INTO
Music (Category, User, Link)
VALUES
('Anime','{0}','{1}');
""").format(aUser,theLink))
newCur.close()
conn.commit()
return
def insertEngMusic (aUser,theLink):
newCur = conn.cursor()
newCur.execute(("""
INSERT INTO
Music (Category, User, Link)
VALUES
('English','{0}','{1}');
""").format(aUser,theLink))
newCur.close()
conn.commit()
return
def insertIcon (aUser,theLink):
newCur = conn.cursor()
newCur.execute(("""
INSERT INTO
Music (Category, User, Link)
VALUES
('Iconic','{0}','{1}');
""").format(aUser,theLink))
newCur.close()
conn.commit()
return
def deleteQuote (quoteID):
newCur = conn.cursor()
newCur.execute(("""
DELETE
FROM
Quotes
WHERE
ID == {0};
""").format(quoteID))
newCur.close()
conn.commit()
return
def getQuote(aConn):
sqlCmd = "SELECT Words FROM Quotes;"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
return (random.choice(allQuotes))
def getAniMusic(aConn):
sqlCmd = "SELECT Link FROM Music WHERE Category = 'Anime';"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
return (random.choice(allQuotes))
def getEngMusic(aConn):
sqlCmd = "SELECT Link FROM Music WHERE Category = 'English';"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
return (random.choice(allQuotes))
def getIconic(aConn):
sqlCmd = "SELECT Link FROM Music WHERE Category = 'Iconic';"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
return (random.choice(allQuotes))
def getAllQuotes(aConn):
sqlCmd = "SELECT ID, Words FROM Quotes;"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
newStr = "All the Quotes\n"
for item in allQuotes:
i = 1
for place in item:
if i == 1:
newStr += "ID: " + str(place) + "\n"
if i == 2:
newStr += "Words: " + str(place) + "\n\n"
i += 1
return newStr
def EODReportRange (date1, date2): # Gets a range summary of the VM number and status reported
cmd = (("""
SELECT
ServerNumber as [Server]
, ServerStatus as [Status]
, count(ServerStatus) as [Amount]
FROM
Status
WHERE
date(TimeStamp) BETWEEN '{0}' AND '{1}'
AND ServerNumber IN('1','2','3','4','17')
GROUP BY
ServerNumber
,ServerStatus
""").format(date1, date2))
results = SQLReturn(conn,cmd)
newStr = "Report for: " + date1 + " to " + date2 + "\n"
for row in results:
i = 1
for item in row:
if i == 1:
newStr += "VM" + str(item) + " - "
if i == 2:
newStr += "Status: " + str(item) + " - "
if i == 3:
if item != 1:
newStr += "Reported: " + str(item) + " times"
else:
newStr += "Reported: " + str(item) + " time"
i += 1
newStr += "\n"
return newStr
def parseSlackInput(aText):
if aText and len(aText) > 0:
item = aText[0]
if 'text' in item:
msg = item['text'].strip(' ')
chn = item['channel']
usr = item['user']
stp = item['ts']
return [str(msg),str(chn),str(usr),str(stp)]
else:
return [None,None,None,None]
def inChannelResponse(channel,response):
slack_client.api_call(
"chat.postMessage",
channel=channel,
text=response,
as_user=True
)
return
def threadedResponse(channel,response,stamp):
slack_client.api_call(
"chat.postMessage",
channel=channel,
text=response,
thread_ts=stamp,
as_user=True
)
return
def directResponse(someUser,text):
slack_client.api_call(
"chat.postMessage",
channel=someUser,
text=text,
as_user=True
)
return
def parseQuote(someMsg):
starter,theQuote = someMsg.split(' ', 1)
return theQuote
def handle_command(command, channel, aUser, tStamp):
"""
Executes bot command if the command is known
"""
#command = command.lower()
response = None
# This is where you start to implement more commands!
if command.lower().startswith("!help"):
response = """I'm Otaku Bot!
I don't do a lot yet. But watch out! I'm just getting started!
!addquote[SPACE][A quote of your choice!] - I will remember your quote!
!quote - I will reply with a random quote!
!addAniMusic[SPACE][Link to a Japanese anime song] - I will remember your music!
!addEngMusic[SPACE][Link to an English anime song] - I will remember your music!
!addIconic[SPACE][Link to an iconic anime moment] - I will remember your moment!
!animusic - I will reply with a Japanese anime song from memory!
!engmusic - I will reply with an English anime song from memory!
!iconic - I will show you an iconic anime moment!
"""
inChannelResponse(channel,response)
return
if command.lower().startswith("!addquote"):
newQuote = str(command[10:])
insertQuote(aUser,newQuote)
threadedResponse(channel,"I'll try to remember: " + newQuote ,tStamp)
stdOut("Quote Added: " + newQuote)
return
if command.lower().startswith("!quote"):
aQuote = getQuote(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!animusic"):
aQuote = getAniMusic(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!engmusic"):
aQuote = getEngMusic(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!iconic"):
aQuote = getIconic(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!onepunch"):
inChannelResponse(channel,"https://www.youtube.com/watch?v=_TUTJ0klnKk")
return
if command.lower().startswith("!addanimusic"):
newQuote = str(command[13:])
insertAniMusic(aUser,newQuote)
threadedResponse(channel,"I'll add this to the Anime music section: " + newQuote ,tStamp)
stdOut("Anime Music Added: " + newQuote)
return
if command.lower().startswith("!addengmusic"):
newQuote = str(command[13:])
insertEngMusic(aUser,newQuote)
threadedResponse(channel,"I'll add this to the English music section: " + newQuote ,tStamp)
stdOut("English Music Added: " + newQuote)
return
if command.lower().startswith("!addiconic"):
newQuote = str(command[11:])
insertIcon(aUser,newQuote)
threadedResponse(channel,"I'll add this to the Iconic moments section: " + newQuote ,tStamp)
stdOut("Iconic Moment Added: " + newQuote)
return
if command.lower().startswith("!delquote"):
if aUser == "UC176R92M":
num = command[10:]
deleteQuote(num)
inChannelResponse(channel,"You have removed a quote.")
else:
inChannelResponse(channel,"You don't have permission to do that!")
return
if command.lower().startswith("!getquotes"):
if aUser == "UC176R92M":
inChannelResponse(channel,getAllQuotes(conn))
else:
inChannelResponse(channel,"You don't have permission to do that!")
return
if command.startswith("!test"):
return
response = (("""Text:{0}
Channel:{1}
TS:{2}
User:{3}
""").format(command,channel,tStamp,aUser))
inChannelResponse(channel,response)
return
return
# Sends the response back to the channel
if __name__ == "__main__":
if slack_client.rtm_connect(with_team_state=False):
stdOut("Otaku Bot connected and running!")
# Read bot's user ID by calling Web API method `auth.test`
otakuBotID = slack_client.api_call("auth.test")["user_id"]
while True:
try:
command, channel,usr,stp = parseSlackInput(slack_client.rtm_read())
if command:
handle_command(command, channel,usr,stp)
except:
pass
schedule.run_pending()
time.sleep(RTM_READ_DELAY)
else:
stdOut("Connection failed. Exception traceback printed above.")
| 2.375 | 2 |
ocr.py | tunc2112/uet-img-processing | 0 | 17795 | <gh_stars>0
from PIL import Image
import cv2
import pytesseract
import tesserocr
from pyocr import pyocr
from pyocr import builders
import sys
import os
def get_image_filename(img_id):
filename = "img_src/src{0:0>3}".format(img_id)
for ext in [".png", ".jpg", ".jpeg"]:
if os.path.exists(os.path.join(os.getcwd(), filename + ext)):
return filename + ext
def write_output(libname, img_id, text):
filename = "./output/output_{1}_{0:0>3}.txt".format(img_id, libname)
with open(filename, "w") as f:
f.write(text)
def ocr_pytesseract(img_id):
img_filename = './' + get_image_filename(img_id)
img = cv2.imread(img_filename, cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
original = pytesseract.image_to_string(gray, config='')
write_output(img, original)
def ocr_pyocr(img_id):
img_filename = './' + get_image_filename(img_id)
img = cv2.imread(img_filename, cv2.IMREAD_COLOR)
tools = pyocr.get_available_tools()
if len(tools) == 0:
print("No OCR tool found")
sys.exit(1)
tool = tools[0]
langs = tool.get_available_languages()
lang = langs[0]
txt = tool.image_to_string(
Image.open(img_filename),
lang=lang,
builder=pyocr.tesseract.builders.TextBuilder()
)
write_output(img_id, txt)
def ocr_tesseract(img_id):
img_filename = './' + get_image_filename(img_id)
txt = tesserocr.image_to_text(Image.open(img_filename))
write_output(img_id, txt)
| 2.75 | 3 |
object_detection/det_heads/retinaNet_head/retinanet_head.py | no-name-xiaosheng/PaddleViT | 993 | 17796 | <reponame>no-name-xiaosheng/PaddleViT<gh_stars>100-1000
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
import paddle.nn as nn
from paddle.nn.initializer import Normal, Constant
from retinanet_loss import RetinaNetLoss
from post_process import RetinaNetPostProcess
from det_utils.generator_utils import AnchorGenerator
class RetinaNetHead(nn.Layer):
'''
The head used in RetinaNet for object classification and box regression.
It has two subnets for the two tasks, with a common structure but separate parameters.
'''
def __init__(self, config):
'''
Args:
input_shape (List[ShapeSpec]): input shape.
num_classes (int): number of classes. Used to label background proposals.
num_anchors (int): number of generated anchors.
conv_dims (List[int]): dimensions for each convolution layer.
norm (str or callable):
Normalization for conv layers except for the two output layers.
See :func:`detectron2.layers.get_norm` for supported types.
loss_func (class): the class is used to compute loss.
prior_prob (float): Prior weight for computing bias.
'''
super(RetinaNetHead, self).__init__()
num_convs = config.RETINANET.NUM_CONVS
input_channels = config.RETINANET.INPUT_CHANNELS
norm = config.RETINANET.NORM
prior_prob = config.RETINANET.PRIOR_PROB
self.num_classes = config.RETINANET.NUM_CLASSES
self.get_loss = RetinaNetLoss(
focal_loss_alpha=config.RETINANET.FOCAL_LOSS_ALPHA,
focal_loss_gamma=config.RETINANET.FOCAL_LOSS_GAMMA,
smoothl1_loss_delta=config.RETINANET.SMOOTHL1_LOSS_DELTA,
positive_thresh=config.RETINANET.POSITIVE_THRESH,
negative_thresh=config.RETINANET.NEGATIVE_THRESH,
allow_low_quality=config.RETINANET.ALLOW_LOW_QUALITY,
num_classes=config.RETINANET.NUM_CLASSES,
weights=config.RETINANET.WEIGHTS
)
self.postprocess = RetinaNetPostProcess(
score_threshold=config.RETINANET.SCORE_THRESH,
keep_top_k=config.RETINANET.KEEP_TOPK,
nms_top_k=config.RETINANET.NMS_TOPK,
nms_threshold=config.RETINANET.NMS_THRESH,
bbox_reg_weights=config.RETINANET.WEIGHTS
)
self.anchor_generator = AnchorGenerator(anchor_sizes=config.RETINANET.ANCHOR_SIZE,
aspect_ratios=config.RETINANET.ASPECT_RATIOS,
strides=config.RETINANET.STRIDES,
offset=config.RETINANET.OFFSET)
num_anchors = self.anchor_generator.num_anchors
conv_dims = [input_channels] * num_convs
cls_net = []
reg_net = []
for in_channels, out_channels in zip(
[input_channels] + list(conv_dims), conv_dims
):
cls_net.append(
nn.Conv2D(in_channels, out_channels, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01)))
)
if norm == "bn":
cls_net.append(nn.BatchNorm2D(out_channels))
cls_net.append(nn.ReLU())
reg_net.append(
nn.Conv2D(in_channels, out_channels, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01)))
)
if norm == "bn":
reg_net.append(nn.BatchNorm2D(out_channels))
reg_net.append(nn.ReLU())
self.cls_net = nn.Sequential(*cls_net)
self.reg_net = nn.Sequential(*reg_net)
bias_value = -math.log((1 - prior_prob) / prior_prob)
self.cls_score = nn.Conv2D(
conv_dims[-1], num_anchors * self.num_classes, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01)),
bias_attr=paddle.ParamAttr(initializer=Constant(bias_value))
)
self.bbox_pred = nn.Conv2D(
conv_dims[-1], num_anchors * 4, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01))
)
def forward(self, feats, inputs):
'''
Returns:
loss_dict (dict) | pred_result(tensor), bbox_num(tensor):
loss_dict: contains cls_losses and reg_losses.
pred_result: the shape is [M, 6], M is the number of final preds,
Each row has 6 values: [label, score, xmin, ymin, xmax, ymax]
bbox_num: the shape is [N], N is the num of batch_size,
bbox_num[i] means the i'th img have bbox_num[i] boxes.
'''
anchors = self.anchor_generator(feats)
pred_scores = []
pred_boxes = []
for feat in feats:
pred_scores.append(self.cls_score(self.cls_net(feat)))
pred_boxes.append(self.bbox_pred(self.reg_net(feat)))
pred_scores_list = [
transpose_to_bs_hwa_k(s, self.num_classes) for s in pred_scores
]
pred_boxes_list = [
transpose_to_bs_hwa_k(s, 4) for s in pred_boxes
]
if self.training:
anchors = paddle.concat(anchors)
loss_dict = self.get_loss(anchors, [pred_scores_list, pred_boxes_list], inputs)
return loss_dict
else:
img_whwh = paddle.concat([inputs["imgs_shape"][:, 1:2],
inputs["imgs_shape"][:, 0:1]], axis=-1)
pred_result, bbox_num = self.postprocess(
pred_scores_list,
pred_boxes_list,
anchors,
inputs["scale_factor_wh"],
img_whwh
)
return pred_result, bbox_num
def transpose_to_bs_hwa_k(tensor, k):
assert tensor.dim() == 4
bs, _, h, w = tensor.shape
tensor = tensor.reshape([bs, -1, k, h, w])
tensor = tensor.transpose([0, 3, 4, 1, 2])
return tensor.reshape([bs, -1, k])
| 2.59375 | 3 |
brp/formutils.py | chop-dbhi/biorepo-portal | 6 | 17797 | <reponame>chop-dbhi/biorepo-portal<filename>brp/formutils.py
from django import template
from django.forms import widgets
register = template.Library()
@register.inclusion_tag('formfield.html')
def formfield(field):
widget = field.field.widget
type_ = None
if isinstance(widget, widgets.Input):
type_ = 'input'
elif isinstance(widget, widgets.Textarea):
type_ = 'textarea'
elif isinstance(widget, widgets.Select):
type_ = 'select'
elif isinstance(widget, widgets.CheckboxInput):
type_ = 'checkbox'
elif isinstance(widget, widgets.RadioInput):
type_ = 'radio'
return {'field': field, 'form': field.form, 'type': type_}
| 1.882813 | 2 |
learning/modules/visitation_softmax.py | esteng/guiding-multi-step | 0 | 17798 | <reponame>esteng/guiding-multi-step
import torch
import torch.nn as nn
import numpy as np
class VisitationSoftmax(nn.Module):
def __init__(self, log=False):
super(VisitationSoftmax, self).__init__()
self.log = log
self.logsoftmax = nn.LogSoftmax()
self.softmax = nn.Softmax(dim=1)
def forward(self, visitation_distributions, goal_outside_score=None):
"""
Applies softmax on visitation distributions, while handling the case where we assign additional
probability of the goal being outside of the observed map region.
:param visitation_distributions:
:return: Nx3xHxW tensor where first channel is probability over visited locations, second channel is probability of stop locations,
third channel is a copy of the same value indicating the probability that goal location is not visible
"""
batch_size = visitation_distributions.size(0)
num_channels = visitation_distributions.size(1)
assert num_channels == 2, "Must have 2 channels: visitation distribution scores and goal distribution scores"
height = visitation_distributions.size(2)
width = visitation_distributions.size(3)
visitation_dist_scores = visitation_distributions[:, 0, :, :]
goal_inside_dist_scores = visitation_distributions[:, 1, :, :]
softmax_func = self.log_softmax if self.log else self.softmax
# Visitation distribution: Flatten, softmax, reshape back
visitation_dist = softmax_func(visitation_dist_scores.view(batch_size, width*height)).view(visitation_dist_scores.size())
# We are modelling OOB probability
if goal_outside_score is not None:
# Goal distribution: Flatten, append outside score, softmax, split off outside score, reshape back
goal_scores_full = torch.cat([goal_inside_dist_scores.view(batch_size, width*height),goal_outside_score[:, np.newaxis]], dim=1)
goal_dist_full = softmax_func(goal_scores_full)
goal_inside_partial_dist = goal_dist_full[:, :-1].view(goal_inside_dist_scores.size())
goal_outside_prob_or_logprob = goal_dist_full[:, -1]
# Re-assemble back into the Bx2xHxW tensor representation
visitation_prob_or_log_prob_out = torch.stack([visitation_dist, goal_inside_partial_dist], dim=1)
return visitation_prob_or_log_prob_out, goal_outside_prob_or_logprob
else:
goal_dist = softmax_func(goal_inside_dist_scores.view(batch_size, width * height)).view(
goal_inside_dist_scores.size())
# Re-assemble back into the Bx2xHxW tensor representation
visitation_prob_or_log_prob_out = torch.stack([visitation_dist, goal_dist], dim=1)
return visitation_prob_or_log_prob_out | 2.3125 | 2 |
baekjoon/easy-math/17362-finger.py | honux77/algorithm | 2 | 17799 | <gh_stars>1-10
n = int(input()) % 8
if n == 0:
print(2)
elif n <= 5:
print(n)
else:
print(10 - n)
| 2.875 | 3 |